{_val_f1:.4f}"
f" — val_precision: {_val_precision:.4f}"
f" — val_recall: {_val_recall:.4f}"))
return
In [6]:
batch_size = 64
num_classes = 14
epochs = 30
val_split = 0.1
save_dir = os.path.join(os.getcwd(), 'models')
model_name = 'keras_cnn_model.h5'
In [7]:
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
In [8]:
f1_metrics = Metrics()
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
hist = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=[f1_metrics],
validation_split=val_split,
shuffle=True
)
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 176669 samples, validate on 19630 samples
Epoch 1/30
176669/176669 [==============================] - 25s 139us/step - loss: 0.7065 - acc: 0.7801 - val_loss: 0.4995 - val_acc: 0.8322
val_f1: 0.4737 — val_precision: 0.5971 — val_recall: 0.4506
Epoch 2/30
1088/176669 [..............................] - ETA: 20s - loss: 0.5210 - acc: 0.8346
/opt/conda/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.
'precision', 'predicted', average, warn_for)
/opt/conda/lib/python3.6/site-packages/sklearn/metrics/classification.py:1143: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples.
'precision', 'predicted', average, warn_for)
176669/176669 [==============================] - 20s 111us/step - loss: 0.5137 - acc: 0.8341 - val_loss: 0.4165 - val_acc: 0.8625
val_f1: 0.6290 — val_precision: 0.7200 — val_recall: 0.5941
Epoch 3/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.4522 - acc: 0.8519 - val_loss: 0.3753 - val_acc: 0.8790
val_f1: 0.7002 — val_precision: 0.7599 — val_recall: 0.6748
Epoch 4/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.4148 - acc: 0.8626 - val_loss: 0.3463 - val_acc: 0.8842
val_f1: 0.7160 — val_precision: 0.7663 — val_recall: 0.6952
Epoch 5/30
176669/176669 [==============================] - 20s 112us/step - loss: 0.3910 - acc: 0.8700 - val_loss: 0.3262 - val_acc: 0.8927
val_f1: 0.7180 — val_precision: 0.7776 — val_recall: 0.6866
Epoch 6/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.3710 - acc: 0.8758 - val_loss: 0.3225 - val_acc: 0.8918
val_f1: 0.7178 — val_precision: 0.7933 — val_recall: 0.6846
Epoch 7/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.3541 - acc: 0.8809 - val_loss: 0.2957 - val_acc: 0.9013
val_f1: 0.7680 — val_precision: 0.7901 — val_recall: 0.7544
Epoch 8/30
176669/176669 [==============================] - 20s 113us/step - loss: 0.3408 - acc: 0.8855 - val_loss: 0.2802 - val_acc: 0.9071
val_f1: 0.7833 — val_precision: 0.8065 — val_recall: 0.7684
Epoch 9/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.3303 - acc: 0.8886 - val_loss: 0.2747 - val_acc: 0.9090
val_f1: 0.7855 — val_precision: 0.8201 — val_recall: 0.7706
Epoch 10/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.3210 - acc: 0.8912 - val_loss: 0.2724 - val_acc: 0.9096
val_f1: 0.7905 — val_precision: 0.8167 — val_recall: 0.7755
Epoch 11/30
176669/176669 [==============================] - 20s 112us/step - loss: 0.3152 - acc: 0.8930 - val_loss: 0.2702 - val_acc: 0.9121
val_f1: 0.7730 — val_precision: 0.8145 — val_recall: 0.7514
Epoch 12/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.3073 - acc: 0.8956 - val_loss: 0.2615 - val_acc: 0.9138
val_f1: 0.7628 — val_precision: 0.7943 — val_recall: 0.7451
Epoch 13/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.3013 - acc: 0.8981 - val_loss: 0.2598 - val_acc: 0.9150
val_f1: 0.8018 — val_precision: 0.8323 — val_recall: 0.7893
Epoch 14/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.2966 - acc: 0.8988 - val_loss: 0.2506 - val_acc: 0.9163
val_f1: 0.8082 — val_precision: 0.8147 — val_recall: 0.8059
Epoch 15/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.2893 - acc: 0.9017 - val_loss: 0.2577 - val_acc: 0.9157
val_f1: 0.8010 — val_precision: 0.8308 — val_recall: 0.7830
Epoch 16/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.2838 - acc: 0.9026 - val_loss: 0.2418 - val_acc: 0.9213
val_f1: 0.8145 — val_precision: 0.8275 — val_recall: 0.8065
Epoch 17/30
176669/176669 [==============================] - 20s 111us/step - loss: 0.2814 - acc: 0.9035 - val_loss: 0.2419 - val_acc: 0.9213
val_f1: 0.8157 — val_precision: 0.8357 — val_recall: 0.8044
Epoch 18/30
12224/176669 [=>............................] - ETA: 17s - loss: 0.2751 - acc: 0.9074
In [9]:
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
Saved trained model at /kaggle/working/models/keras_cnn_model.h5
In [10]:
history_df = pd.DataFrame(hist.history)
history_df['val_f1'] = f1_metrics.val_f1s
history_df['val_precision'] = f1_metrics.val_precisions
history_df['val_recall'] = f1_metrics.val_recalls
history_df[['loss', 'val_loss']].plot()
history_df[['acc', 'val_acc']].plot()
history_df[['val_f1', 'val_precision', 'val_recall']].plot()
Out[10]:
In [11]:
y_test = model.predict(x_test)
submission_df = pd.read_csv('../input/iwildcam-2019-fgvc6/sample_submission.csv')
submission_df['Predicted'] = y_test.argmax(axis=1)
print(submission_df.shape)
submission_df.head()
(153730, 2)
Out[11]:
|
Id
|
Predicted
|
0
|
b005e5b2-2c0b-11e9-bcad-06f10d5896c4
|
0
|
1
|
f2347cfe-2c11-11e9-bcad-06f10d5896c4
|
6
|
2
|
27cf8d26-2c0e-11e9-bcad-06f10d5896c4
|
0
|
3
|
f82f52c7-2c1d-11e9-bcad-06f10d5896c4
|
0
|
4
|
e133f50d-2c1c-11e9-bcad-06f10d5896c4
|
0
|
In [12]:
submission_df.to_csv('submission.csv',index=False)
history_df.to_csv('history.csv', index=False)
with open('history.json', 'w') as f:
json.dump(hist.history, f)
Dostları ilə paylaş: |