R语言神经网络keras,tensorflow,mxnet

来源:互联网 发布:番茄时钟软件 pc 编辑:程序博客网 时间:2024/04/29 09:42
library(keras)
#loading the keras inbuilt mnist dataset
data<-dataset_mnist()
#separating train and test file
train_x<-data$train$x
train_y<-data$train$y
test_x<-data$test$x
test_y<-data$test$y
rm(data)
# converting a 2D array into a 1D array for feeding into the MLP and normalising the matrix
train_x <- array(train_x, dim = c(dim(train_x)[1], prod(dim(train_x)[-1]))) / 255
test_x <- array(test_x, dim = c(dim(test_x)[1], prod(dim(test_x)[-1]))) / 255
#converting the target variable to once hot encoded vectors using keras inbuilt function
train_y<-to_categorical(train_y,10)
test_y<-to_categorical(test_y,10)
#defining a keras sequential model
model <- keras_model_sequential()
#defining the model with 1 input layer[784 neurons], 1 hidden layer[784 neurons] with dropout rate 0.4 and 1 output layer[10 neurons]
#i.e number of digits from 0 to 9
model %>%
  layer_dense(units = 784, input_shape = 784) %>%
  layer_dropout(rate=0.4)%>%
  layer_activation(activation = 'relu') %>%
  layer_dense(units = 10) %>%
  layer_activation(activation = 'softmax')
#compiling the defined model with metric = accuracy and optimiser as adam.
model %>% compile(
  loss = 'categorical_crossentropy',
  optimizer = 'adam',
  metrics = c('accuracy')
)
#fitting the model on the training dataset
model %>% fit(train_x, train_y, epochs = 100, batch_size = 128)
#Evaluating model on the cross validation dataset

loss_and_metrics <- model %>% evaluate(test_x, test_y, batch_size = 128)


###################################################################


library(tensorflow)
sess = tf$Session()
hello <- tf$constant('Hello, TensorFlow!')
sess$run(hello)




library(tensorflow)


# Create 100 phony x, y data points, y = x * 0.1 + 0.3
x_data <- runif(100, min=0, max=1)
y_data <- x_data * 0.1 + 0.3


# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W <- tf$Variable(tf$random_uniform(shape(1L), -1.0, 1.0))
b <- tf$Variable(tf$zeros(shape(1L)))
y <- W * x_data + b


# Minimize the mean squared errors.
loss <- tf$reduce_mean((y - y_data) ^ 2)
optimizer <- tf$train$GradientDescentOptimizer(0.5)
train <- optimizer$minimize(loss)


# Launch the graph and initialize the variables.
sess = tf$Session()
sess$run(tf$initialize_all_variables())


# Fit the line (Learns best fit is W: 0.1, b: 0.3)
for (step in 1:201) {
  sess$run(train)
  if (step %% 20 == 0)
    cat(step, "-", sess$run(W), sess$run(b), "\n")
}



################################################################


library(mlbench)
library(mxnet)
data(Sonar, package="mlbench")
str(Sonar)




 Sonar[,61] = as.numeric(Sonar[,61])-1
 train.ind = c(1:50, 100:150)
 train.x = data.matrix(Sonar[train.ind, 1:60])
 train.y = Sonar[train.ind, 61]
 test.x = data.matrix(Sonar[-train.ind, 1:60])
 test.y = Sonar[-train.ind, 61]
 
 mx.set.seed(0)
 # 模型,num.round表示迭代数,array.batch.size表示批规模
 model <- mx.mlp(train.x, train.y, hidden_node=10, out_node=2,out_activation="softmax", 
                 num.round=20, array.batch.size=15, learning.rate=0.07, momentum=0.9, eval.metric=mx.metric.accuracy)
 
 
preds = predict(model, test.x)
pred.label = max.col(t(preds))-1
table(pred.label, test.y)
(24+33) / (24+14+36+33) 


原创粉丝点击