Submitted by Electronic-Clerk868 t3_11m49hd in deeplearning

target_size=c(200,200)

batch_size=100

train_data_gen=image_data_generator(rescale = 1./255,horizontal_flip = T,vertical_flip = T,rotation_range = 45,zoom_range = 0.25,validation_split = 0.2)

​

# train

train_image_array_gen= flow_images_from_directory(directory = "imagenes/TRAIN/",shuffle=T,target_size =target_size,color_mode = "grayscale", batch_size = batch_size ,subset = "training", generator = train_data_gen)

# validation

val_image_array_gen= flow_images_from_directory(directory = "imagenes/TRAIN/",target_size = target_size,shuffle = T, color_mode = "grayscale", batch_size = batch_size,subset = "validation", generator = train_data_gen)

​

​

initializer=initializer_random_normal(seed = 100)

model=keras_model_sequential(name='simple_model')%>%

layer_conv_2d(filters = 16,

kernel_size = c(3,3),

padding = 'same',

activation = 'relu',

kernel_initializer = initializer,

bias_initializer = initializer,

input_shape = c(tamaño_imagen,1)

)%>%

layer_max_pooling_2d(pool_size = c(2,2))%>%

layer_flatten()%>%

layer_dense(units = 16,

activation = 'relu',

kernel_initializer = initializer,

bias_initializer = initializer)%>%

layer_dense(units = output_n,

activation = 'sigmoid',

name = 'Output',

kernel_initializer = initializer,

bias_initializer = initializer)

model

​

​

model %>%

compile(

loss='categorical_crossentropy',

optimizer = optimizer_adam(learning_rate=0.0001),

metrics = 'accuracy'

)

​

history=model %>%

fit(train_image_array_gen,steps_per_epoch=as.integer(train_samples/batch_size),epochs=40,validation_data=val_image_array_gen,validation_steps=as.integer(valid_samples/batch_size)

)

*plot(history)----> RESULTS*

https://preview.redd.it/5ekgkqqk8kma1.png?width=663&format=png&auto=webp&v=enabled&s=d0b7a81091f377f823f1b52a7bb0ec713c28ca4f

​

val_data=data.frame(file_name=paste0('imagenes/TRAIN/',val_image_array_gen$filenames)) %>%

mutate(class=str_extract(file_name,'Control|PD'))

​

image_prep=function(x){

arrays=lapply(x, function(path){

img=image_load(path,target_size = c(200,200),grayscale = T)

x=image_to_array(img)

x=array_reshape(x,c(1,dim(x)))

x=x/255 #normalizar los pixeles de la imagen

})

do.call(abind::abind,c(arrays,list(along=1)))

}

​

test_x=image_prep(val_data$file_name)

dim(test_x)

​

pred_test=model %>%

predict(test_x)%>%

k_argmax()

head(pred_test,10)

​

decode=function(x){

case_when(x==0~'Control',

x==1~'PD' )

}

pred_test=sapply(pred_test,decode)

head(pred_test,10)

*confusionMatrix(table(as.factor(pred_test),as.factor(val_data$class)))----->RESULTS*

​

https://preview.redd.it/fo3d3sdx8kma1.png?width=642&format=png&auto=webp&v=enabled&s=c6a76cc9e5dd5ce2aee904e95544286b466214e0

*history$metrics$accuracy[40]---->RESULTS*

​

https://preview.redd.it/21t0vyby8kma1.png?width=254&format=png&auto=webp&v=enabled&s=5200c07f5af184ec34e3bde5cef828487758320c

0

Comments

You must log in or register to comment.

Ferocious_Armadillo t1_jbhura1 wrote

Increase the learning rate of your optimizer. Something else you could try— look into “back propagation” to tune your learning rate after each layer.

3