classifier.py 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. #!/usr/bin/env python
  2. # coding: utf-8
  3. # In[29]:
  4. import itertools
  5. import os
  6. import matplotlib.pylab as plt
  7. import numpy as np
  8. import tensorflow as tf
  9. import tensorflow_hub as hub
  10. from tensorflow.keras import datasets, layers, models
  11. print("TF version:", tf.__version__)
  12. print("Hub version:", hub.__version__)
  13. print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE")
  14. IMAGE_SIZE=(224, 224)
  15. BATCH_SIZE=32
  16. datagen_kwargs = dict(rescale=1./255, validation_split=.20)
  17. dataflow_kwargs = dict(target_size=IMAGE_SIZE, batch_size=BATCH_SIZE,
  18. interpolation="bilinear")
  19. train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
  20. **datagen_kwargs)
  21. train_generator = train_datagen.flow_from_directory(
  22. "data/seg_train/seg_train", subset="training", shuffle=True, **dataflow_kwargs)
  23. print(f"Data shape {train_generator[1][0].shape}")
  24. valid_generator = train_datagen.flow_from_directory(
  25. "data/seg_test/seg_test", subset="validation", shuffle=False, **dataflow_kwargs)
  26. model = tf.keras.models.Sequential()
  27. model.add(layers.Conv2D(32, (3, 3), activation='relu'))
  28. model.add(layers.MaxPooling2D((2, 2)))
  29. model.add(layers.Conv2D(64, (3, 3), activation='relu'))
  30. model.add(layers.MaxPooling2D((2, 2)))
  31. model.add(layers.Conv2D(128, (3, 3), activation='relu'))
  32. model.add(layers.MaxPooling2D((2, 2)))
  33. model.add(layers.Flatten())
  34. model.add(layers.Dense(64, activation='relu'))
  35. model.add(layers.Dense(train_generator.num_classes, activation='softmax'))
  36. model.build(input_shape=(32, 224, 224, 3))
  37. print(f"Model summary: {model.summary()}")
  38. optmizer = tf.keras.optimizers.Adam(learning_rate=0.001)
  39. model.compile(optimizer=optmizer, loss=tf.keras.losses.MeanSquaredError(),
  40. metrics=['accuracy'])
  41. history = model.fit(train_generator, batch_size=32,
  42. epochs=30, validation_data=valid_generator)