kerasを使ったcnn

kerasを使ってvgg16の転移学習

m208.sにはGTX1050のGPUがついているので、まずはm208.sにsshでログインする

sudo docker pull nvidia/cuda:9.0-cudnn7-devel
sudo docker run --runtime=nvidia --name yoshitake03 -it nvidia/cuda:9.0-cudnn7-devel /bin/bash #yoshitake03は適当に変える
apt-get update
apt install -y ssh python3-pip nano git unzip screen eog
apt install -y libglib2.0 libsm6 libxext6 libfontconfig1 libxrender1 python3-tk
apt install -y liblapack-dev libhdf5-dev zlib1g-dev libfreetype6-dev libpng12-dev locate
echo "termcapinfo xterm 'is=\E[r\E[m\E[2J\E[H\E[?7h\E[?1;4;6l'
defscrollback 100000
screen /bin/bash" > ~/.screenrc

python3 -m pip install --upgrade pip
python3 -m pip install keras
python3 -m pip install tensorflow-gpu
python3 -m pip install ipython
python3 -m pip install opencv-python
python3 -m pip install tqdm
python3 -m pip install Pillow
python3 -m pip install matplotlib

なんとかlibcudnn7-dev_7.0.5.15-1+cuda9.0_amd64.deb とlibcudnn7_7.0.5.15-1+cuda9.0_amd64.debをコピーしてくる。
dpkg -i libcudnn7*

もし途中でdockerを抜けていて、再開したいときは
sudo docker start -i yoshitake03

byte形式の画像データの確認方法
od -An -v -tu1 -j16 -w28 train-images-idx3-ubyte |more

下記のフォルダを作り、その中にさらにカテゴリごとにフォルダを作り画像を入れておく。(例:犬の写真の場合、ディレクトリtrain/dogを作り、その中にdog01.jpg, dog02.jpg, … をコピーする。)

train, validate, test

学習用のpythonスクリプト

実行する場合、python3 jiseki-vgg16.pyなどとする

from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D,Input, Flatten, Activation, Dropout
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Sequential
from keras.callbacks import CSVLogger

n_categories=132
batch_size=32
train_dir='jiseki/train'
validation_dir='jiseki/validation'
file_name='jiseki_vgg16'

base_model=VGG16(weights='imagenet',include_top=False,
                 input_tensor=Input(shape=(224,224,3)))

#add new layers instead of FC networks

x=base_model.output
#x=GlobalAveragePooling2D()(x)
x=Flatten()(x)
x=Dense(1024,activation='relu')(x)
prediction=Dense(n_categories,activation='softmax')(x)
model=Model(inputs=base_model.input,outputs=prediction)
#top_model = Sequential()
#top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
#top_model.add(Dense(256))
#top_model.add(Activation("relu"))
#top_model.add(Dropout(0.5))
#top_model.add(Dense(n_categories))
#top_model.add(Activation("softmax"))
#model = Model(input=base_model.input, output=top_model(base_model.output))

#fix weights before VGG16 14layers
for layer in base_model.layers[:15]:
    layer.trainable=False

#model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),
#              loss='categorical_crossentropy',
#              metrics=['accuracy'])
model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True),
              metrics=['accuracy'])

model.summary()

#save model
json_string=model.to_json()
open(file_name+'.json','w').write(json_string)

train_datagen=ImageDataGenerator(
    rescale=1.0/255,
    rotation_range=180,
#    width_shift_range=0.1,
    height_shift_range=0.1,
#    shear_range=0.2,
#    zoom_range=0.2,
    horizontal_flip=True)

validation_datagen=ImageDataGenerator(rescale=1.0/255)

train_generator=train_datagen.flow_from_directory(
    train_dir,
    target_size=(224,224),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True
)

validation_generator=validation_datagen.flow_from_directory(
    validation_dir,
    target_size=(224,224),
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True
)

hist=model.fit_generator(train_generator,
                         epochs=500,
                         verbose=1,
                         validation_data=validation_generator,
                         callbacks=[CSVLogger(file_name+'.csv')])

#save weights
model.save(file_name+'.h5')

判定用のスクリプト

使用方法は

python3 jiseki_predict.py xxx.jpg

from keras.models import model_from_json
import matplotlib.pyplot as plt
import numpy as np
import os,random
from keras.preprocessing.image import img_to_array, load_img
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import sys

batch_size=32
file_name='jiseki_vgg16'
test_dir='jiseki/test'
display_dir='jiseki/display'
label=['Albulidae-Albula','Albulidae-Pterothrissus','Alepocephalidae-Alepocephalus','Alepocephalidae-Xenodermichthys','Anguillidae-Anguilla','Antennariidae-Antennarius','Antennariidae-His
trio','Argentinidae-Argentina','Argentinidae-Glossanodon','Ariidae-Arius','Ariidae-Netuma','Ariidae-Plicofollis','Ateleopodidae-Ateleopus','Ateleopodidae-Ijimaia','Atherinidae-Atherinomor
us','Atherinidae-Hypoatherina','Aulopidae-Hime','Bagridae-Pseudobagrus','Bregmacerotidae-Bregmaceros','Carapidae-Pyramodon','Chanidae-Chanos','Chaunacidae-Chaunax','Chlorophthalmidae-Chlo
rophthalmus','Clupeidae-Amblygaster','Clupeidae-Etrumeus','Clupeidae-Herklotsichthys','Clupeidae-Nematalosa','Clupeidae-Sardinella','Congridae-Bathymyrus','Congridae-Conger','Congridae-Ma
crocephenchelys','Congridae-Rhynchoconger','Congridae-Uroconger','Cyprinidae-Acrossocheilus','Cyprinidae-Aristichthys','Cyprinidae-Candidia','Cyprinidae-Carassius','Cyprinidae-Chanodichth
ys','Cyprinidae-Clarias','Cyprinidae-Culter','Cyprinidae-Cyprinus','Cyprinidae-Gobiobotia','Cyprinidae-Hemiculter','Cyprinidae-Hypsibarbus','Cyprinidae-Mylopharyngodon','Cyprinidae-Onycho
stoma','Cyprinidae-Opsariichthys','Cyprinidae-Pseudorasbora','Cyprinidae-Puntius','Cyprinidae-Rhodeus','Cyprinidae-Spinibarbus','Cyprinidae-Tanakia','Elopidae-Elops','Engraulidae-Engrauli
s','Engraulidae-Setipinna','Engraulidae-Stolephorus','Engraulidae-Thryssa','Exocoetidae-Cheilopogon','Exocoetidae-Cypselurus','Exocoetidae-Exocoetus','Exocoetidae-Hirundichthys','Exocoeti
dae-Oxyporhamphus','Gigantactinidae-Gigantactis','Gonorynchidae-Gonorynchus','Gonostomatidae-Gonostoma','Gonostomatidae-Sigmops','Lophiidae-Lophiodes','Lophiidae-Lophiomus','Lophiidae-Lop
hius','Lophotidae-Lophotus','Macrouridae-Coelorinchus','Macrouridae-Coelorinclus','Macrouridae-Coryphaenoides','Macrouridae-Hymenocephalus','Macrouridae-Lucigadus','Macrouridae-Malacoceph
alus','Macrouridae-Nezumia','Macrouridae-Ventrifossa','Megalopidae-Megalops','Melanocetidae-Melanocetus','Moridae-Gadella','Moridae-Laemonema','Moridae-Physiculus','Moringuidae-Moringua',
'Mugilidae-Chelon','Mugilidae-Moolgarda','Mugilidae-Mugil','Muraenesocidae-Gavialiceps','Muraenesocidae-Muraenesox','Muraenidae-Echidna','Muraenidae-Enchelycore','Muraenidae-Gymmothorax',
'Myctophidae-Benthosema','Myctophidae-Diaphus','Myctophidae-Electrona','Myctophidae-Myctophum','Nemichthyidae-Nemichthys','Neoscopelidae-Neoscopelus','Notacanthidae-Notacanthus','Ogcoceph
alildae-Halieutaea','Ogcocephalildae-Malthopsis','Ogcocephalildae-Solocisquama','Ophidiidae-Glyptophidium','Ophidiidae-Hoplobrotula','Ophidiidae-Neobythites','Ophidiidae-Ophidion','Osmeri
dae-Plecoglossus','Paralepididae-Lestidium','Paralepididae-Lestrolepis','Plotosidae-Plotosus','Polymixiidae-Polymixia','Pristigasteridae-Ilisha','Regalecidae-Regalecus','Salmonidae-Oncorh
ynchus','Sternoptychidae-Argyropelecus','Sternoptychidae-Maurolicus','Sternoptychidae-Polyipnus','Stomiidae-Astronesthes','Stomiidae-Chauliodus','Stomiidae-Photonectes','Stomiidae-Stomias
','Synaphobranchidae-Dysomma','Synaphobranchidae-Echelus','Synaphobranchidae-Meadia','Synaphobranchidae-Neenchelys','Synaphobranchidae-Ophichthus','Synaphobranchidae-Pisodonophis','Synodo
ntidae-Harpadon','Synodontidae-Saurida','Synodontidae-Synodus','Synodontidae-Trachinocephalus','Trachipteridae-Trachipterus']

#load model and weights
json_string=open(file_name+'.json').read()
model=model_from_json(json_string)
model.load_weights(file_name+'.h5')

model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#data generate
test_datagen=ImageDataGenerator(rescale=1.0/255)

#test_generator=test_datagen.flow_from_directory(
#    test_dir,
#    target_size=(224,224),
#    batch_size=batch_size,
#    class_mode='categorical',
#    shuffle=True
#)

#evaluate model
#score=model.evaluate_generator(test_generator)
#print('\n test loss:',score[0])
#print('\n test_acc:',score[1])

#predict model and display images
#files=os.listdir(display_dir)
#img=random.sample(files,2)

for i in sys.argv[1:] :
  temp_img=load_img(sys.argv[1],target_size=(224,224))
#  plt.imshow(temp_img)
  temp_img_array=img_to_array(temp_img)
  temp_img_array=temp_img_array.astype('float32')/255.0
  temp_img_array=temp_img_array.reshape((1,224,224,3))
  img_pred=model.predict(temp_img_array)
  print(i+": "+label[np.argmax(img_pred)])
  temp_res_list={}
  k=0
  for j in img_pred[0]:
    temp_res_list[k]=j
    k=k+1
  temp_res_sort=sorted(temp_res_list.items(), key=lambda x:x[1], reverse=True)
  print("1: "+label[temp_res_sort[0][0]]+" "+str(temp_res_sort[0][1]))
  print("2: "+label[temp_res_sort[1][0]]+" "+str(temp_res_sort[1][1]))
  print("3: "+label[temp_res_sort[2][0]]+" "+str(temp_res_sort[2][1]))
  print("4: "+label[temp_res_sort[3][0]]+" "+str(temp_res_sort[3][1]))
  print("5: "+label[temp_res_sort[4][0]]+" "+str(temp_res_sort[4][1]))
  del temp_img
  del img_pred
  • kerasを使ったcnn.1527790473.txt.gz
  • 最終更新: 2018/05/31 18:14
  • by suikou