差分
このページの2つのバージョン間の差分を表示します。
両方とも前のリビジョン 前のリビジョン 次のリビジョン | 前のリビジョン | ||
kerasを使ったcnn [2018/05/07 01:42] – 133.11.144.10 | kerasを使ったcnn [Unknown date] (現在) – 削除 - 外部編集 (Unknown date) 127.0.0.1 | ||
---|---|---|---|
行 1: | 行 1: | ||
- | # kerasを使ってvgg16の転移学習 | ||
- | ``` | ||
- | sudo docker pull nvidia/ | ||
- | sudo docker run --runtime=nvidia --name yoshitake03 -it nvidia/ | ||
- | apt-get update | ||
- | apt install -y ssh python3-pip nano git unzip screen eog | ||
- | echo " | ||
- | defscrollback 100000 | ||
- | screen / | ||
- | |||
- | python3 -m pip install --upgrade pip | ||
- | python3 -m pip install keras | ||
- | python3 -m pip install tensorflow-gpu | ||
- | python3 -m pip install ipython | ||
- | python3 -m pip install opencv-python | ||
- | python3 -m pip install tqdm | ||
- | python3 -m pip install Pillow | ||
- | python3 -m pip install matplotlib | ||
- | |||
- | apt install -y libglib2.0 libsm6 libxext6 libfontconfig1 libxrender1 python3-tk | ||
- | なんとかlibcudnn7-dev_7.0.5.15-1+cuda9.0_amd64.deb とlibcudnn7_7.0.5.15-1+cuda9.0_amd64.debをコピーしてくる。 | ||
- | dpkg -i libcudnn7* | ||
- | |||
- | もし途中でdockerを抜けていて、再開したいときは | ||
- | sudo docker start -i yoshitake03 | ||
- | |||
- | byte形式の画像データの確認方法 | ||
- | od -An -v -tu1 -j16 -w28 train-images-idx3-ubyte |more | ||
- | ``` | ||
- | |||
- | 下記のフォルダを作り、その中にさらにカテゴリごとにフォルダを作り画像を入れておく。(例:犬の写真の場合、ディレクトリtrain/ | ||
- | |||
- | 学習用のpythonスクリプト | ||
- | |||
- | 実行する場合、python3 jiseki-vgg16.pyなどとする | ||
- | ``` | ||
- | from keras.models import Model | ||
- | from keras.layers import Dense, GlobalAveragePooling2D, | ||
- | from keras.applications.vgg16 import VGG16 | ||
- | from keras.preprocessing.image import ImageDataGenerator | ||
- | from keras.optimizers import SGD | ||
- | from keras.models import Sequential | ||
- | from keras.callbacks import CSVLogger | ||
- | |||
- | n_categories=132 | ||
- | batch_size=32 | ||
- | train_dir=' | ||
- | validation_dir=' | ||
- | file_name=' | ||
- | |||
- | base_model=VGG16(weights=' | ||
- | | ||
- | |||
- | #add new layers instead of FC networks | ||
- | |||
- | x=base_model.output | ||
- | # | ||
- | x=Flatten()(x) | ||
- | x=Dense(1024, | ||
- | prediction=Dense(n_categories, | ||
- | model=Model(inputs=base_model.input, | ||
- | #top_model = Sequential() | ||
- | # | ||
- | # | ||
- | # | ||
- | # | ||
- | # | ||
- | # | ||
- | #model = Model(input=base_model.input, | ||
- | |||
- | #fix weights before VGG16 14layers | ||
- | for layer in base_model.layers[: | ||
- | layer.trainable=False | ||
- | |||
- | # | ||
- | # loss=' | ||
- | # metrics=[' | ||
- | model.compile(loss=' | ||
- | optimizer=SGD(lr=1e-3, | ||
- | metrics=[' | ||
- | |||
- | model.summary() | ||
- | |||
- | #save model | ||
- | json_string=model.to_json() | ||
- | open(file_name+' | ||
- | |||
- | train_datagen=ImageDataGenerator( | ||
- | rescale=1.0/ | ||
- | rotation_range=180, | ||
- | # width_shift_range=0.1, | ||
- | height_shift_range=0.1, | ||
- | # shear_range=0.2, | ||
- | # zoom_range=0.2, | ||
- | horizontal_flip=True) | ||
- | |||
- | validation_datagen=ImageDataGenerator(rescale=1.0/ | ||
- | |||
- | train_generator=train_datagen.flow_from_directory( | ||
- | train_dir, | ||
- | target_size=(224, | ||
- | batch_size=batch_size, | ||
- | class_mode=' | ||
- | shuffle=True | ||
- | ) | ||
- | |||
- | validation_generator=validation_datagen.flow_from_directory( | ||
- | validation_dir, | ||
- | target_size=(224, | ||
- | batch_size=batch_size, | ||
- | class_mode=' | ||
- | shuffle=True | ||
- | ) | ||
- | |||
- | hist=model.fit_generator(train_generator, | ||
- | | ||
- | | ||
- | | ||
- | | ||
- | |||
- | #save weights | ||
- | model.save(file_name+' | ||
- | ``` | ||
- | |||
- | 判定用のスクリプト | ||
- | |||
- | 使用方法は | ||
- | |||
- | python3 jiseki_predict.py xxx.jpg | ||
- | |||
- | ``` | ||
- | from keras.models import model_from_json | ||
- | import matplotlib.pyplot as plt | ||
- | import numpy as np | ||
- | import os,random | ||
- | from keras.preprocessing.image import img_to_array, | ||
- | from keras.preprocessing.image import ImageDataGenerator | ||
- | from keras.optimizers import SGD | ||
- | import sys | ||
- | |||
- | batch_size=32 | ||
- | file_name=' | ||
- | test_dir=' | ||
- | display_dir=' | ||
- | label=[' | ||
- | trio',' | ||
- | us',' | ||
- | rophthalmus',' | ||
- | crocephenchelys',' | ||
- | ys',' | ||
- | stoma',' | ||
- | s',' | ||
- | dae-Oxyporhamphus',' | ||
- | hius',' | ||
- | alus',' | ||
- | ' | ||
- | ' | ||
- | alildae-Halieutaea',' | ||
- | dae-Plecoglossus',' | ||
- | ynchus',' | ||
- | ',' | ||
- | ntidae-Harpadon',' | ||
- | |||
- | #load model and weights | ||
- | json_string=open(file_name+' | ||
- | model=model_from_json(json_string) | ||
- | model.load_weights(file_name+' | ||
- | |||
- | model.compile(optimizer=SGD(lr=0.0001, | ||
- | loss=' | ||
- | metrics=[' | ||
- | |||
- | #data generate | ||
- | test_datagen=ImageDataGenerator(rescale=1.0/ | ||
- | |||
- | # | ||
- | # test_dir, | ||
- | # target_size=(224, | ||
- | # batch_size=batch_size, | ||
- | # class_mode=' | ||
- | # shuffle=True | ||
- | #) | ||
- | |||
- | #evaluate model | ||
- | # | ||
- | # | ||
- | # | ||
- | |||
- | #predict model and display images | ||
- | # | ||
- | # | ||
- | |||
- | for i in sys.argv[1: | ||
- | temp_img=load_img(sys.argv[1], | ||
- | # plt.imshow(temp_img) | ||
- | temp_img_array=img_to_array(temp_img) | ||
- | temp_img_array=temp_img_array.astype(' | ||
- | temp_img_array=temp_img_array.reshape((1, | ||
- | img_pred=model.predict(temp_img_array) | ||
- | print(i+": | ||
- | temp_res_list={} | ||
- | k=0 | ||
- | for j in img_pred[0]: | ||
- | temp_res_list[k]=j | ||
- | k=k+1 | ||
- | temp_res_sort=sorted(temp_res_list.items(), | ||
- | print(" | ||
- | print(" | ||
- | print(" | ||
- | print(" | ||
- | print(" | ||
- | del temp_img | ||
- | del img_pred | ||
- | ``` |