1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677 |
- #!/usr/bin/env python
- # coding: utf-8
- # In[29]:
- import itertools
- import os
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
- import matplotlib.pylab as plt
- import numpy as np
- import tensorflow as tf
- import tensorflow_hub as hub
- from tensorflow.keras import datasets, layers, models, preprocessing
- print("TF version:", tf.__version__)
- print("Hub version:", hub.__version__)
- print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE")
- IMAGE_SIZE=(224, 224)
- BATCH_SIZE=32
- datagen_kwargs = dict(rescale=1./255, validation_split=.20)
- dataflow_kwargs = dict(target_size=IMAGE_SIZE, batch_size=BATCH_SIZE,
- interpolation="bilinear")
- train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
- **datagen_kwargs)
- train_generator = train_datagen.flow_from_directory(
- "data/seg_train/seg_train", subset="training", shuffle=True, **dataflow_kwargs)
- print(f"Data shape {train_generator[1][0].shape}")
- valid_generator = train_datagen.flow_from_directory(
- "data/seg_train/seg_train", subset="validation", shuffle=False, **dataflow_kwargs)
- model = tf.keras.models.Sequential()
- model.add(layers.Conv2D(32, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(64, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(128, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Flatten())
- model.add(layers.Dense(64, activation='relu'))
- model.add(layers.Dense(train_generator.num_classes, activation='softmax'))
- model.build(input_shape=(32, 224, 224, 3))
- print(f"Model summary: {model.summary()}")
- optmizer = tf.keras.optimizers.Adam(learning_rate=0.001)
- model.compile(optimizer=optmizer, loss=tf.keras.losses.MeanSquaredError(),
- metrics=['accuracy'])
- history = model.fit(train_generator, batch_size=32,
- epochs=30, validation_data=valid_generator)
-
- img = preprocessing.image.load_img("data/seg_test/buildings/20057.jpg")
- img_arr = preprocessing.image.img_to_array(img)
- img_arr = tf.expand_dims(img_arr, 0)
- true_labels = valid_generator.classeds
- predictions = model.predict(img_arr)
- y_pred = np.array([np.argmax(x) for x in predictions])
- print(y_pred)
|