Submitted by Electronic-Clerk868 t3_11m49hd in deeplearning
target_size=c(200,200)
batch_size=100
train_data_gen=image_data_generator(rescale = 1./255,horizontal_flip = T,vertical_flip = T,rotation_range = 45,zoom_range = 0.25,validation_split = 0.2)
​
# train
train_image_array_gen= flow_images_from_directory(directory = "imagenes/TRAIN/",shuffle=T,target_size =target_size,color_mode = "grayscale", batch_size = batch_size ,subset = "training", generator = train_data_gen)
# validation
val_image_array_gen= flow_images_from_directory(directory = "imagenes/TRAIN/",target_size = target_size,shuffle = T, color_mode = "grayscale", batch_size = batch_size,subset = "validation", generator = train_data_gen)
​
​
initializer=initializer_random_normal(seed = 100)
model=keras_model_sequential(name='simple_model')%>%
layer_conv_2d(filters = 16,
kernel_size = c(3,3),
padding = 'same',
activation = 'relu',
kernel_initializer = initializer,
bias_initializer = initializer,
input_shape = c(tamaño_imagen,1)
)%>%
layer_max_pooling_2d(pool_size = c(2,2))%>%
layer_flatten()%>%
layer_dense(units = 16,
activation = 'relu',
kernel_initializer = initializer,
bias_initializer = initializer)%>%
layer_dense(units = output_n,
activation = 'sigmoid',
name = 'Output',
kernel_initializer = initializer,
bias_initializer = initializer)
model
​
​
model %>%
compile(
loss='categorical_crossentropy',
optimizer = optimizer_adam(learning_rate=0.0001),
metrics = 'accuracy'
)
​
history=model %>%
fit(train_image_array_gen,steps_per_epoch=as.integer(train_samples/batch_size),epochs=40,validation_data=val_image_array_gen,validation_steps=as.integer(valid_samples/batch_size)
)
*plot(history)----> RESULTS*
​
val_data=data.frame(file_name=paste0('imagenes/TRAIN/',val_image_array_gen$filenames)) %>%
mutate(class=str_extract(file_name,'Control|PD'))
​
image_prep=function(x){
arrays=lapply(x, function(path){
img=image_load(path,target_size = c(200,200),grayscale = T)
x=image_to_array(img)
x=array_reshape(x,c(1,dim(x)))
x=x/255 #normalizar los pixeles de la imagen
})
do.call(abind::abind,c(arrays,list(along=1)))
}
​
test_x=image_prep(val_data$file_name)
dim(test_x)
​
pred_test=model %>%
predict(test_x)%>%
k_argmax()
head(pred_test,10)
​
decode=function(x){
case_when(x==0~'Control',
x==1~'PD' )
}
pred_test=sapply(pred_test,decode)
head(pred_test,10)
*confusionMatrix(table(as.factor(pred_test),as.factor(val_data$class)))----->RESULTS*
​
*history$metrics$accuracy[40]---->RESULTS*
​
Ferocious_Armadillo t1_jbhura1 wrote
Increase the learning rate of your optimizer. Something else you could try— look into “back propagation” to tune your learning rate after each layer.