|
- # Copyright 2020 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
- """
- ######################## train lenet example ########################
- train lenet and get network model files(.ckpt) :
- python train.py --data_path /YourDataPath
- """
-
- # download and put Mnist dataset into ./Data/train and ./Data/test directories
- # python train.py --device_target=CPU > log_train.txt 2>&1
-
- import os
- import argparse
- import moxing as mox
- import numpy as np
- from matplotlib import pyplot as plt
- import matplotlib.image as mpimg
- from tensorflow.keras.utils import to_categorical
- from keras.models import Sequential
- from keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D
- from keras.optimizers import SGD
- import pandas as pd
- import tensorflow as tf
- from keras.layers.core import Flatten
-
- parser = argparse.ArgumentParser(description='MindSpore Lenet Example')
- # define 2 parameters for running on modelArts
- parser.add_argument('--data_url',
- help='path to training/inference dataset folder',
- default='./data')
-
- parser.add_argument('--train_url',
- help='model folder to save/load',
- default='./model')
- parser.add_argument('--epoch_size',
- help='model folder to save/load',
- default=5)
-
-
- def load_data(data_path):
- data = pd.read_csv(data_path) #Read dataset from file csv
- data = data.values #Load values
- np.random.shuffle(data) #Shuffle dataset
- x = data[:, 1:].reshape(-1, 28, 28, 1)
- y = data[:, 0].astype(np.int32)
- y = tf.keras.utils.to_categorical(y, 10)
- return x, y
-
-
- if __name__ == "__main__":
- args = parser.parse_args()
- obs_data_url = args.data_url
- args.data_url = '/home/work/user-job-dir/inputs/data/'
- obs_train_url = args.train_url
- args.train_url = '/home/work/user-job-dir/outputs/model/'
- try:
- mox.file.copy_parallel(obs_data_url, args.data_url)
- print("Successfully Download {} to {}".format(obs_data_url,
- args.data_url))
- except Exception as e:
- print('moxing download {} to {} failed: '.format(
- obs_data_url, args.data_url) + str(e))
-
- X_train, y_train = load_data("/home/work/user-job-dir/inputs/data/mnist_csv/mnist_train.csv")
- X_test, y_test = load_data("/home/work/user-job-dir/inputs/data/mnist_csv/mnist_test.csv")
- X = X_test
-
-
-
- X_train = X_train.astype('float32')
- X_test = X_test.astype('float32')
- X_train /= 255
- X_test /= 255
-
-
- print(X_train.shape)
- print(X_test.shape)
- print(y_train.shape)
- print(y_test.shape)
-
- model = Sequential()
- model.add(Conv2D(32, (3, 3), activation = 'relu', kernel_initializer='he_uniform', padding = 'same', input_shape = (28, 28, 1)))
- model.add(Conv2D(32, (3, 3), activation = 'relu', kernel_initializer='he_uniform', padding = 'same', input_shape = (28, 28, 1)))
- model.add(MaxPooling2D((2,2)))
- model.add(Conv2D(64, (3, 3), activation = 'relu', kernel_initializer='he_uniform', padding = 'same'))
- model.add(Conv2D(64, (3, 3), activation = 'relu', kernel_initializer='he_uniform', padding = 'same'))
- model.add(MaxPooling2D((3, 3)))
- model.add(Conv2D(128, (3, 3), activation = 'relu', kernel_initializer='he_uniform', padding = 'same'))
- model.add(Conv2D(128, (3, 3), activation = 'relu', kernel_initializer='he_uniform', padding = 'same'))
- model.add(MaxPooling2D((3, 3)))
- #Add Flatten
- model.add(Flatten())
- model.add(Dense(128, activation = 'relu', kernel_initializer='he_uniform'))
- model.add(Dense(10, activation = 'softmax'))
-
-
-
- opt = SGD(lr = 0.01, momentum = 0.9)
- model.compile(optimizer=opt, loss = 'categorical_crossentropy', metrics=['accuracy'])
-
-
-
- history = model.fit(X_train, y_train, epochs = args.epoch_size, batch_size=64, validation_data=(X_test, y_test), verbose= 1)
-
|