classifier.py 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. #!/usr/bin/env python
  2. # coding: utf-8
  3. # In[29]:
  4. import itertools
  5. import os
  6. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
  7. import matplotlib.pylab as plt
  8. import numpy as np
  9. import tensorflow as tf
  10. import tensorflow_hub as hub
  11. from tensorflow.keras import datasets, layers, models, preprocessing
  12. print("TF version:", tf.__version__)
  13. print("Hub version:", hub.__version__)
  14. print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE")
  15. IMAGE_SIZE=(224, 224)
  16. BATCH_SIZE=32
  17. datagen_kwargs = dict(rescale=1./255, validation_split=.20)
  18. dataflow_kwargs = dict(target_size=IMAGE_SIZE, batch_size=BATCH_SIZE,
  19. interpolation="bilinear")
  20. train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
  21. **datagen_kwargs)
  22. train_generator = train_datagen.flow_from_directory(
  23. "data/seg_train/seg_train", subset="training", shuffle=True, **dataflow_kwargs)
  24. print(f"Data shape {train_generator[1][0].shape}")
  25. valid_generator = train_datagen.flow_from_directory(
  26. "data/seg_train/seg_train", subset="validation", shuffle=False, **dataflow_kwargs)
  27. model = tf.keras.models.Sequential()
  28. model.add(layers.Conv2D(32, (3, 3), activation='relu'))
  29. model.add(layers.MaxPooling2D((2, 2)))
  30. model.add(layers.Conv2D(64, (3, 3), activation='relu'))
  31. model.add(layers.MaxPooling2D((2, 2)))
  32. model.add(layers.Conv2D(128, (3, 3), activation='relu'))
  33. model.add(layers.MaxPooling2D((2, 2)))
  34. model.add(layers.Flatten())
  35. model.add(layers.Dense(64, activation='relu'))
  36. model.add(layers.Dense(train_generator.num_classes, activation='softmax'))
  37. model.build(input_shape=(32, 224, 224, 3))
  38. print(f"Model summary: {model.summary()}")
  39. optmizer = tf.keras.optimizers.Adam(learning_rate=0.001)
  40. model.compile(optimizer=optmizer, loss=tf.keras.losses.MeanSquaredError(),
  41. metrics=['accuracy'])
  42. history = model.fit(train_generator, batch_size=32,
  43. epochs=30, validation_data=valid_generator)
  44. img = preprocessing.image.load_img("data/seg_test/buildings/20057.jpg")
  45. img_arr = preprocessing.image.img_to_array(img)
  46. img_arr = tf.expand_dims(img_arr, 0)
  47. true_labels = valid_generator.classeds
  48. predictions = model.predict(img_arr)
  49. y_pred = np.array([np.argmax(x) for x in predictions])
  50. print(y_pred)