Check the backend used by keras

In [1]:
from keras import backend
print(backend._BACKEND)
Using TensorFlow backend.
tensorflow
In [2]:
from keras import backend as K
if K.backend()=='tensorflow':
    K.set_image_dim_ordering("th")
In [3]:
import time
import matplotlib.pyplot as plt
import numpy as np
% matplotlib inline
np.random.seed(2017) 
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import Activation, Flatten, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
In [4]:
from keras.datasets import cifar10
(train_features, train_labels), (test_features, test_labels) = cifar10.load_data()
num_train, img_channels, img_rows, img_cols =  train_features.shape
num_test, _, _, _ =  train_features.shape
num_classes = len(np.unique(train_labels))

Normalize the Input

In [5]:
train_features = train_features.astype('float32')/255
test_features = test_features.astype('float32')/255
In [6]:
train_labels = np_utils.to_categorical(train_labels,num_classes)
test_labels = np_utils.to_categorical(test_labels,num_classes)
In [18]:
model = Sequential()
model.add(Convolution2D(48,(3,3),padding='same',input_shape=(3,32,32)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(num_classes,activation='softmax'))
'''
model.add(Convolution2D(48,(3,3),padding='same',input_shape=(3,32,32)))
model.add(Activation('relu'))
model.add(Convolution2D(48,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Convolution2D(96,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(96,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Convolution2D(192,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Convolution2D(192,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes,activation='softmax'))
'''
model.compile(optimizer='adam', loss= 'categorical_crossentropy',metrics=['accuracy'])
In [20]:
start = time.time()
model_info = model.fit(train_features,train_labels,
                      batch_size = 1024, epochs=10,
                      validation_data = (test_features,test_labels),verbose=1)
end = time.time()
Train on 50000 samples, validate on 10000 samples
Epoch 1/10
50000/50000 [==============================] - 206s - loss: 1.9141 - acc: 0.3370 - val_loss: 1.6492 - val_acc: 0.4319
Epoch 2/10
50000/50000 [==============================] - 207s - loss: 1.5519 - acc: 0.4593 - val_loss: 1.4593 - val_acc: 0.4867
Epoch 3/10
50000/50000 [==============================] - 209s - loss: 1.3965 - acc: 0.5144 - val_loss: 1.3425 - val_acc: 0.5289
Epoch 4/10
50000/50000 [==============================] - 221s - loss: 1.3022 - acc: 0.5456 - val_loss: 1.2670 - val_acc: 0.5566
Epoch 5/10
50000/50000 [==============================] - 222s - loss: 1.2292 - acc: 0.5749 - val_loss: 1.2177 - val_acc: 0.5795
Epoch 6/10
50000/50000 [==============================] - 221s - loss: 1.1749 - acc: 0.5932 - val_loss: 1.1825 - val_acc: 0.5878
Epoch 7/10
50000/50000 [==============================] - 220s - loss: 1.1328 - acc: 0.6097 - val_loss: 1.1501 - val_acc: 0.5982
Epoch 8/10
50000/50000 [==============================] - 220s - loss: 1.0958 - acc: 0.6191 - val_loss: 1.1161 - val_acc: 0.6138
Epoch 9/10
50000/50000 [==============================] - 223s - loss: 1.0491 - acc: 0.6372 - val_loss: 1.0839 - val_acc: 0.6229
Epoch 10/10
50000/50000 [==============================] - 224s - loss: 1.0243 - acc: 0.6456 - val_loss: 1.0685 - val_acc: 0.6312
In [21]:
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_21 (Conv2D)           (None, 48, 32, 32)        1344      
_________________________________________________________________
activation_27 (Activation)   (None, 48, 32, 32)        0         
_________________________________________________________________
max_pooling2d_12 (MaxPooling (None, 48, 16, 16)        0         
_________________________________________________________________
dropout_18 (Dropout)         (None, 48, 16, 16)        0         
_________________________________________________________________
flatten_4 (Flatten)          (None, 12288)             0         
_________________________________________________________________
dense_12 (Dense)             (None, 512)               6291968   
_________________________________________________________________
activation_28 (Activation)   (None, 512)               0         
_________________________________________________________________
dense_13 (Dense)             (None, 10)                5130      
=================================================================
Total params: 6,298,442
Trainable params: 6,298,442
Non-trainable params: 0
_________________________________________________________________
In [ ]: