Digit Recognizer比赛Top6%

xiaoxiao2021-02-27  395

Learn computer vision fundamentals with the famous MNIST data

基于keras + tensorflow

分别采用VGG和Resnet网络实现预测,效果分别是0.99300和0.99614

直接上代码

#coding=utf-8 from pandas import read_csv import numpy as np from keras.models import Sequential,Model from keras.layers import Dense,Dropout,BatchNormalization,Flatten,add,Input from keras.layers.convolutional import Conv2D,MaxPooling2D,AveragePooling2D from keras.utils.np_utils import to_categorical from keras.optimizers import SGD from keras.callbacks import ModelCheckpoint from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import cPickle import gzip #使用Resnet网络迭代6次准确率达0.99614,使用VGG网络准确率为0.99300 data = gzip.open(r'/media/wmy/document/BigData/kaggle/Digit Recognizer/mnist.pkl.gz') train_set,valid_set,test_set = cPickle.load(data) train_data = np.concatenate([train_set[0],valid_set[0],test_set[0]]) train_label = np.concatenate([train_set[1],valid_set[1],test_set[1]]) train = read_csv(r'/media/wmy/document/BigData/kaggle/Digit Recognizer/train.csv',header=None,delimiter=',') test = read_csv(r'/media/wmy/document/BigData/kaggle/Digit Recognizer/test.csv',header=None,delimiter=',') train = train.values train_x = np.concatenate((train[:,1:],train_data)) train_y = np.concatenate((train[:,0],train_label)) train_x = train_x.reshape((-1,28,28,1))/255. train_y = to_categorical(train_y) print train_x.shape print train_y.shape test = test.values test_x = test[:,:].reshape((-1,28,28,1))/255. seed = 7 np.random.seed(seed=seed) def CNN_Model_1(): model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=(28, 28, 1), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(96, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(96, (2, 2), activation='relu', padding='valid')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(100, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) sgd = SGD(lr=0.001, momentum=0.9) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) model.summary() return model def VGG(): model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=(28, 28, 1), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (2, 2), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(256, (2, 2), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(256, (2, 2), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(AveragePooling2D(pool_size=(7, 7))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) sgd = SGD(lr=0.001, momentum=0.9) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) model.summary() return model def Conv2d_BN(x, nb_filter,kernel_size, strides=(1,1), padding='same'): x = Conv2D(nb_filter,kernel_size,padding=padding,strides=strides,activation='relu')(x) x = BatchNormalization(axis=3)(x) return x def Conv_Block(inpt,nb_filter,kernel_size,strides=(1,1), with_conv_shortcut=False): x = Conv2d_BN(inpt, nb_filter=nb_filter, kernel_size=kernel_size, strides=strides, padding='same') x = Conv2d_BN(x, nb_filter=nb_filter, kernel_size=kernel_size, padding='same') if with_conv_shortcut: shortcut = Conv2d_BN(inpt, nb_filter=nb_filter, strides=strides, kernel_size=kernel_size) x = add([x, shortcut]) return x else: x = add([x, inpt]) return x def Resnet(): inpt = Input(shape=(28, 28, 1)) x = Conv2d_BN(inpt, nb_filter=64, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=64, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3), strides=(1, 1), with_conv_shortcut=True) x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=128, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True) x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=256, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True) x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3)) x = Conv_Block(x, nb_filter=512, kernel_size=(3, 3)) x = AveragePooling2D(pool_size=(7, 7))(x) x = Flatten()(x) x = Dense(10, activation='softmax')(x) model = Model(inpt,x) sgd = SGD(lr=0.001, momentum=0.9) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) model.summary() return model callback = [] model_check = ModelCheckpoint(filepath='best_params.h5',monitor='val_acc',save_best_only=True,mode='max') callback.append(model_check) #model = VGG() model = Resnet() ''' datagen = ImageDataGenerator( featurewise_center=True, samplewise_center=True, featurewise_std_normalization=True, samplewise_std_normalization=True, zca_whitening=False, rotation_range=10., width_shift_range=.2, height_shift_range=.2, rescale=None, ) datagen.fit(train_x) ''' ''' model.fit_generator( datagen.flow(train_x,train_y,batch_size=20), steps_per_epoch=train_x.shape[0], epochs=30, verbose=2, validation_data=(valid_x,valid_y) ) ''' #model.fit(train_x,train_y,validation_split=0.1,epochs=50,batch_size=20,verbose=2,callbacks=callback) model.load_weights('best_params.h5') #yPred = model.predict_classes(test_x,batch_size=20,verbose=2) yPred = model.predict(test_x,batch_size=20,verbose=2) yPred = np.argmax(yPred,axis=1) np.savetxt('wmy_pred.csv', np.c_[range(1,len(yPred)+1),yPred], delimiter=',', header = 'ImageId,Label', comments = '', fmt='%d')

转载请注明原文地址: https://www.6miu.com/read-4196.html

最新回复(0)