# SqueezeNet for MNIST
https://www.kaggle.com/somshubramajumdar/squeezenet-for-mnist
(Apache License 2.0)を引用してKeras 2に変更し独自のコードを加えた by marsee
```python
# SqueezeNet for MNIST
# 2018/08/02 by marsee
# Keras / Tensorflowで始めるディープラーニング入門 https://qiita.com/yampy/items/706d44417c433e68db0d
# のPythonコードを再利用させて頂いている
# https://www.kaggle.com/somshubramajumdar/squeezenet-for-mnist (Apache License 2.0)を引用して
# Keras 2に変更し独自のコードを加えた
import keras
from keras.datasets import mnist
from keras import backend as K
import pandas as pd
import numpy as np
from keras.models import Model, Input
from keras.layers import Dense, Dropout, Flatten, Concatenate
from keras.layers import Conv2D, MaxPooling2D, Activation, AveragePooling2D
import keras.utils.np_utils as kutils
batch_size = 128
num_classes = 10
epochs = 10
img_rows, img_cols = 28, 28
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = y_train.astype('int32')
y_test = y_test.astype('int32')
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)
# Setup of SqueezeNet (http://arxiv.org/abs/1602.07360), which offers similar performance
# to AlexNet, while using drastically fewer parameters. Tested on CIFAR10, it also performs
# well on MNIST problem
# Uses the latest keras 1.0.2 Functional API
input_layer = Input(shape=(28, 28, 1), name="input")
#conv 1
conv1 = Conv2D(20, (3, 3), padding="valid", kernel_initializer="glorot_uniform", strides=(2, 2))(input_layer)
relu1 = Activation(activation='relu')(conv1)
#maxpool 1
maxpool1 = MaxPooling2D(pool_size=(2,2))(relu1)
#fire 1
fire2_squeeze = Conv2D(10, (1, 1), padding="same", kernel_initializer="glorot_uniform")(maxpool1)
relu2_squeeze = Activation(activation='relu')(fire2_squeeze)
fire2_expand1 = Conv2D(10, (1, 1), padding="same", kernel_initializer="glorot_uniform")(relu2_squeeze)
relu2_expand1 = Activation(activation='relu')(fire2_expand1)
fire2_expand2 = Conv2D(10, (3, 3), padding="same", kernel_initializer="glorot_uniform")(relu2_squeeze)
relu2_expand2 = Activation(activation='relu')(fire2_expand2)
merge1 = Concatenate()([relu2_expand1, relu2_expand2])
fire2 = Activation("linear")(merge1)
#conv 10
conv10 = Conv2D(10, (1, 1), padding="valid", kernel_initializer="glorot_uniform")(fire2)
# The original SqueezeNet has this avgpool1 as well. But since MNIST images are smaller (1,28,28)
# than the CIFAR10 images (3,224,224), AveragePooling2D reduces the image size to (10,0,0),
# crashing the script.
#avgpool 1
#avgpool10 = AveragePooling2D((13,13))(conv10)
flatten = Flatten()(conv10)
softmax = Dense(10, activation="softmax")(flatten)
model = Model(inputs=input_layer, outputs=softmax)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test))
```
Using TensorFlow backend.
x_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 8s 128us/step - loss: 0.4393 - acc: 0.8600 - val_loss: 0.1674 - val_acc: 0.9492
Epoch 2/10
60000/60000 [==============================] - 7s 118us/step - loss: 0.1489 - acc: 0.9546 - val_loss: 0.1089 - val_acc: 0.9657
Epoch 3/10
60000/60000 [==============================] - 7s 118us/step - loss: 0.1123 - acc: 0.9660 - val_loss: 0.0884 - val_acc: 0.9727
Epoch 4/10
60000/60000 [==============================] - 7s 118us/step - loss: 0.0940 - acc: 0.9708 - val_loss: 0.0787 - val_acc: 0.9742
Epoch 5/10
60000/60000 [==============================] - 7s 120us/step - loss: 0.0820 - acc: 0.9753 - val_loss: 0.0665 - val_acc: 0.9794
Epoch 6/10
60000/60000 [==============================] - 7s 118us/step - loss: 0.0745 - acc: 0.9772 - val_loss: 0.0605 - val_acc: 0.9806
Epoch 7/10
60000/60000 [==============================] - 7s 119us/step - loss: 0.0689 - acc: 0.9793 - val_loss: 0.0637 - val_acc: 0.9816
Epoch 8/10
60000/60000 [==============================] - 7s 118us/step - loss: 0.0643 - acc: 0.9803 - val_loss: 0.0585 - val_acc: 0.9813
Epoch 9/10
60000/60000 [==============================] - 7s 117us/step - loss: 0.0600 - acc: 0.9814 - val_loss: 0.0569 - val_acc: 0.9809
Epoch 10/10
60000/60000 [==============================] - 7s 120us/step - loss: 0.0574 - acc: 0.9819 - val_loss: 0.0540 - val_acc: 0.9825
```python
# Keras / Tensorflowで始めるディープラーニング入門 https://qiita.com/yampy/items/706d44417c433e68db0d
# のPythonコードを再利用させて頂いている
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# plot the loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
![]()
![]()
```python
# 学習済みモデルの保存
from keras.models import load_model
model.save('squeezenet4mnist_model.h5') # creates a HDF5 file 'my_model.h5'
```
```python
# 学習済みモデルの読み込み
from keras.models import load_model
model = load_model('squeezenet4mnist_model.h5')
```
Using TensorFlow backend.
```python
# My Mnist CNN (Convolution layerの特徴マップは5個)
# Conv2D - ReLU - MaxPooling - Dence - ReLU - Dence
# 2018/05/25 by marsee
# Keras / Tensorflowで始めるディープラーニング入門 https://qiita.com/yampy/items/706d44417c433e68db0d
# のPythonコードを再利用させて頂いている
import keras
from keras.datasets import mnist
from keras import backend as K
import pandas as pd
import numpy as np
from keras.models import Model, Input
from keras.layers import Dense, Dropout, Flatten, Concatenate
from keras.layers import Conv2D, MaxPooling2D, Activation, AveragePooling2D
import keras.utils.np_utils as kutils
batch_size = 128
num_classes = 10
epochs = 5
img_rows, img_cols = 28, 28
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#Kerasのバックエンドで動くTensorFlowとTheanoでは入力チャンネルの順番が違うので場合分けして書いています
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = y_train.astype('int32')
y_test = y_test.astype('int32')
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes)
y_test = keras.utils.np_utils.to_categorical(y_test, num_classes)
```
x_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
```python
model.summary()
```
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input (InputLayer) (None, 28, 28, 1) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 13, 13, 20) 200 input[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 13, 13, 20) 0 conv2d_1[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 6, 6, 20) 0 activation_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 6, 6, 10) 210 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 6, 6, 10) 0 conv2d_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 6, 6, 10) 110 activation_2[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 6, 6, 10) 910 activation_2[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 6, 6, 10) 0 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 6, 6, 10) 0 conv2d_4[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 6, 6, 20) 0 activation_3[0][0]
activation_4[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 6, 6, 20) 0 concatenate_1[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 6, 6, 10) 210 activation_5[0][0]
__________________________________________________________________________________________________
flatten_1 (Flatten) (None, 360) 0 conv2d_5[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 10) 3610 flatten_1[0][0]
==================================================================================================
Total params: 5,250
Trainable params: 5,250
Non-trainable params: 0
__________________________________________________________________________________________________
```python
# 畳み込み層の重みをCヘッダファイルに書き出す
# 2018/05/31 by marsee
def fwrite_conv_weight(weight, wfile_name, float_wt_name, fixed_wt_name, MAGNIFICATION):
import datetime
import numpy as np
f = open(wfile_name, 'w')
todaytime = datetime.datetime.today()
f.write('// '+wfile_name+'\n')
strdtime = todaytime.strftime("%Y/%m/%d %H:%M:%S")
f.write('// {0} by marsee\n'.format(strdtime))
f.write("\n")
f.write('const float '+float_wt_name+'['+str(weight.shape[0])+']['+str(weight.shape[1])+']['+str(weight.shape[2])+']['+str(weight.shape[3])+'] = \n{\n')
for i in range(weight.shape[0]):
f.write("\t{\n")
for j in range(weight.shape[1]):
f.write("\t\t{\n")
for k in range(weight.shape[2]):
f.write("\t\t\t{")
for m in range(weight.shape[3]):
f.write(str(weight[i][j][k][m]))
if (m==weight.shape[3]-1):
f.write("}")
else:
f.write(",")
if (k==weight.shape[2]-1):
f.write("\n\t\t}\n")
else:
f.write(",\n")
if (j==weight.shape[1]-1):
f.write("\t}\n")
else:
f.write(",\n")
if (i==weight.shape[0]-1):
f.write("};\n")
else:
f.write("\t,\n")
f.write("\n")
f.write('const ap_fixed<'+str(int(np.log2(MAGNIFICATION))+1)+', 1, AP_TRN, AP_WRAP> '+fixed_wt_name+'['+str(weight.shape[0])+']['+str(weight.shape[1])+']['+str(weight.shape[2])+']['+str(weight.shape[3])+'] = \n{\n')
for i in range(weight.shape[0]):
f.write("\t{\n")
for j in range(weight.shape[1]):
f.write("\t\t{\n")
for k in range(weight.shape[2]):
f.write("\t\t\t{")
for m in range(weight.shape[3]):
w_int = int(weight[i][j][k][m]*MAGNIFICATION+0.5)
if (w_int > MAGNIFICATION-1):
w_int = MAGNIFICATION-1
elif (w_int < -MAGNIFICATION):
w_int = -MAGNIFICATION
f.write(str(float(w_int)/float(MAGNIFICATION)))
if (m==weight.shape[3]-1):
f.write("}")
else:
f.write(",")
if (k==weight.shape[2]-1):
f.write("\n\t\t}\n")
else:
f.write(",\n")
if (j==weight.shape[1]-1):
f.write("\t}\n")
else:
f.write(",\n")
if (i==weight.shape[0]-1):
f.write("};\n")
else:
f.write("\t,\n")
f.close()
```
```python
# 畳み込み層と全結合層のバイアスをCヘッダファイルに書き出す
# 2018/05/31 by marsee
def fwrite_bias(bias, wfile_name, float_b_name, fixed_wt_name, MAGNIFICATION):
import datetime
import numpy as np
f = open(wfile_name, 'w')
todaytime = datetime.datetime.today()
f.write('// '+wfile_name+'\n')
strdtime = todaytime.strftime("%Y/%m/%d %H:%M:%S")
f.write('// {0} by marsee\n'.format(strdtime))
f.write("\n")
f.write('const float '+float_b_name+'['+str(bias.shape[0])+'] = {\n\t')
for i in range(bias.shape[0]):
f.write(str(bias[i]))
if (i < bias.shape[0]-1):
f.write(", ")
f.write("\n};\n")
f.write("\n")
f.write('const ap_fixed<'+str(int(np.log2(MAGNIFICATION))+1)+', 1, AP_TRN, AP_WRAP> '+fixed_wt_name+'['+str(bias.shape[0])+'] = {\n\t')
for i in range(bias.shape[0]):
b_int = int(bias[i]*MAGNIFICATION+0.5)
if (b_int > MAGNIFICATION-1):
b_int = MAGNIFICATION-1
elif (b_int < -MAGNIFICATION):
b_int = -MAGNIFICATION
f.write(str(float(b_int)/float(MAGNIFICATION)))
if (i < bias.shape[0]-1):
f.write(", ")
f.write("\n};\n")
f.close()
```
```python
# 層のデータを取得してCのヘッダ・ファイルに出力する(float)
# 2018/09/20 by marsee
def fwrite_layer_output(layer_out, wfile_name, layer_output_name):
import datetime
import numpy as np
f = open(wfile_name, 'w')
todaytime = datetime.datetime.today()
f.write('// '+wfile_name+'\n')
strdtime = todaytime.strftime("%Y/%m/%d %H:%M:%S")
f.write('// {0} by marsee\n'.format(strdtime))
f.write("\n")
f.write('const float '+layer_output_name+'['+str(layer_out.shape[0])+']['+str(layer_out.shape[1])+']['+str(layer_out.shape[2])+']['+str(layer_out.shape[3])+'] = \n{\n')
for i in range(layer_out.shape[0]):
f.write("\t{\n")
for j in range(layer_out.shape[1]):
f.write("\t\t{\n")
for k in range(layer_out.shape[2]):
f.write("\t\t\t{")
for m in range(layer_out.shape[3]):
f.write(str(layer_out[i][j][k][m]))
if (m==layer_out.shape[3]-1):
f.write("}")
else:
f.write(",")
if (k==layer_out.shape[2]-1):
f.write("\n\t\t}\n")
else:
f.write(",\n")
if (j==layer_out.shape[1]-1):
f.write("\t}\n")
else:
f.write(",\n")
if (i==layer_out.shape[0]-1):
f.write("};\n")
else:
f.write("\t,\n")
f.write("\n")
f.close()
```
```python
# 層の出力データを取得する(conv2d_1だけのテスト用)
from keras.models import Model
conv_layer_name = 'conv2d_1'
conv_layer = model.get_layer(conv_layer_name)
conv_layer_wb = conv_layer.get_weights()
conv_layer_model = Model(inputs=model.input,
outputs=model.get_layer(conv_layer_name).output)
x_test_limit = x_test[:1]
y_test_limit = y_test[:10]
print(y_test_limit)
conv_output = conv_layer_model.predict(x_test_limit, verbose=1)
print(conv_output.shape)
fwrite_layer_output(conv_output, 'conv2d_1_output.h', 'conv2d_1_output')
```
[[0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]
1/1 [==============================] - 0s 74ms/step
(1, 13, 13, 20)
# 層の統計情報
```python
# Convolution layerの中間出力を取り出す
from keras.models import Model
import numpy as np
x_test_limit = x_test[:1]
for num in range(1, 6):
conv_layer_name = 'conv2d_' + str(num)
conv_layer = model.get_layer(conv_layer_name)
conv_layer_wb = conv_layer.get_weights()
conv_layer_model = Model(inputs=model.input,
outputs=model.get_layer(conv_layer_name).output)
conv_output = conv_layer_model.predict(x_test, verbose=1)
conv_layer_weight = conv_layer_wb[0]
conv_layer_bias = conv_layer_wb[1]
print(conv_layer_name)
print(conv_layer_weight.shape)
print(conv_layer_weight.transpose(3,2,0,1).shape)
print(conv_layer_bias.shape)
print(conv_output.shape)
print("np.max(conv_layer_weight) = {0}".format(np.max(conv_layer_weight)))
print("np.min(conv_layer_weight) = {0}".format(np.min(conv_layer_weight)))
abs_conv_layer_weight = np.absolute(conv_layer_weight)
print("np.max(abs_conv_layer_weight) = {0}".format(np.max(abs_conv_layer_weight)))
print("np.min(abs_conv_layer_weight) = {0}".format(np.min(abs_conv_layer_weight)))
print("np.max(conv_layer_bias) = {0}".format(np.max(conv_layer_bias)))
print("np.min(conv_layer_bias) = {0}".format(np.min(conv_layer_bias)))
abs_conv_layer_bias = np.absolute(conv_layer_bias)
print("np.max(abs_conv_layer_bias) = {0}".format(np.max(abs_conv_layer_bias)))
print("np.min(abs_conv_layer_bias) = {0}".format(np.min(abs_conv_layer_bias)))
print("conv_output = {0}".format(conv_output.shape))
print("np.std(conv_output) = {0}".format(np.std(conv_output)))
print("np.max(conv_output) = {0}".format(np.max(conv_output)))
print("np.min(conv_output) = {0}".format(np.min(conv_output)))
abs_conv_output = np.absolute(conv_output)
print("np.max(abs_conv) = {0}".format(np.max(abs_conv_output)))
print("np.min(abs_conv) = {0}".format(np.min(abs_conv_output)))
print("")
# 2018/06/05 修正 畳み込み層の重みの配列は(カーネルサイズh,カーネルサイズw, 入力チャネル, 出力チャネル)ということなので、Pythonコードを修正した。@NORA__0013 さんありがとうございました。
MAGNIFICATION_CONV = 2 ** (9-1)
fwrite_conv_weight(conv_layer_weight.transpose(3,2,0,1), 'conv'+str(num)+'_weight.h', 'conv'+str(num)+'_fweight', 'conv'+str(num)+'_weight', MAGNIFICATION_CONV)
fwrite_bias(conv_layer_bias, 'conv'+str(num)+'_bias.h', 'conv'+str(num)+'_fbias', 'conv'+str(num)+'_bias', MAGNIFICATION_CONV)
conv_output = conv_layer_model.predict(x_test_limit, verbose=1)
fwrite_layer_output(conv_output, 'conv'+str(num)+'_output.h', 'conv'+str(num)+'output')
```
10000/10000 [==============================] - 0s 38us/step
conv2d_1
(3, 3, 1, 20)
(20, 1, 3, 3)
(20,)
(10000, 13, 13, 20)
np.max(conv_layer_weight) = 0.4946177303791046
np.min(conv_layer_weight) = -0.5343244671821594
np.max(abs_conv_layer_weight) = 0.5343244671821594
np.min(abs_conv_layer_weight) = 0.00023465760750696063
np.max(conv_layer_bias) = 0.057732950896024704
np.min(conv_layer_bias) = -0.08072157949209213
np.max(abs_conv_layer_bias) = 0.08072157949209213
np.min(abs_conv_layer_bias) = 2.5581393856555223e-05
conv_output = (10000, 13, 13, 20)
np.std(conv_output) = 0.303703248500824
np.max(conv_output) = 1.8376622200012207
np.min(conv_output) = -1.7203431129455566
np.max(abs_conv) = 1.8376622200012207
np.min(abs_conv) = 4.842877388000488e-08
1/1 [==============================] - 0s 0us/step
10000/10000 [==============================] - 0s 40us/step
conv2d_2
(1, 1, 20, 10)
(10, 20, 1, 1)
(10,)
(10000, 6, 6, 10)
np.max(conv_layer_weight) = 0.913070023059845
np.min(conv_layer_weight) = -0.9718273878097534
np.max(abs_conv_layer_weight) = 0.9718273878097534
np.min(abs_conv_layer_weight) = 0.0080069899559021
np.max(conv_layer_bias) = 0.10839419811964035
np.min(conv_layer_bias) = -0.04171771556138992
np.max(abs_conv_layer_bias) = 0.10839419811964035
np.min(abs_conv_layer_bias) = 0.010898223146796227
conv_output = (10000, 6, 6, 10)
np.std(conv_output) = 0.9102568626403809
np.max(conv_output) = 4.971376895904541
np.min(conv_output) = -2.940309762954712
np.max(abs_conv) = 4.971376895904541
np.min(abs_conv) = 1.1175870895385742e-08
1/1 [==============================] - 0s 0us/step
10000/10000 [==============================] - 0s 44us/step
conv2d_3
(1, 1, 10, 10)
(10, 10, 1, 1)
(10,)
(10000, 6, 6, 10)
np.max(conv_layer_weight) = 0.8710610270500183
np.min(conv_layer_weight) = -0.6806471347808838
np.max(abs_conv_layer_weight) = 0.8710610270500183
np.min(abs_conv_layer_weight) = 0.0008496924419887364
np.max(conv_layer_bias) = 0.1616363525390625
np.min(conv_layer_bias) = -0.11668514460325241
np.max(abs_conv_layer_bias) = 0.1616363525390625
np.min(abs_conv_layer_bias) = 0.0003539775207173079
conv_output = (10000, 6, 6, 10)
np.std(conv_output) = 1.006908655166626
np.max(conv_output) = 4.752679347991943
np.min(conv_output) = -3.4257307052612305
np.max(abs_conv) = 4.752679347991943
np.min(abs_conv) = 2.9848888516426086e-07
1/1 [==============================] - 0s 999us/step
10000/10000 [==============================] - 1s 56us/step
conv2d_4
(3, 3, 10, 10)
(10, 10, 3, 3)
(10,)
(10000, 6, 6, 10)
np.max(conv_layer_weight) = 0.6119384765625
np.min(conv_layer_weight) = -0.6267021894454956
np.max(abs_conv_layer_weight) = 0.6267021894454956
np.min(abs_conv_layer_weight) = 0.000530939141754061
np.max(conv_layer_bias) = 0.09105820208787918
np.min(conv_layer_bias) = -0.13521511852741241
np.max(abs_conv_layer_bias) = 0.13521511852741241
np.min(abs_conv_layer_bias) = 0.000247095333179459
conv_output = (10000, 6, 6, 10)
np.std(conv_output) = 1.4005329608917236
np.max(conv_output) = 7.894415378570557
np.min(conv_output) = -7.611080169677734
np.max(abs_conv) = 7.894415378570557
np.min(abs_conv) = 6.484333425760269e-08
1/1 [==============================] - 0s 0us/step
10000/10000 [==============================] - 1s 63us/step
conv2d_5
(1, 1, 20, 10)
(10, 20, 1, 1)
(10,)
(10000, 6, 6, 10)
np.max(conv_layer_weight) = 1.1239129304885864
np.min(conv_layer_weight) = -0.9796998500823975
np.max(abs_conv_layer_weight) = 1.1239129304885864
np.min(abs_conv_layer_weight) = 0.0004102856619283557
np.max(conv_layer_bias) = 0.3322758972644806
np.min(conv_layer_bias) = -0.23224467039108276
np.max(abs_conv_layer_bias) = 0.3322758972644806
np.min(abs_conv_layer_bias) = 0.021110178902745247
conv_output = (10000, 6, 6, 10)
np.std(conv_output) = 1.4941357374191284
np.max(conv_output) = 9.327534675598145
np.min(conv_output) = -11.037702560424805
np.max(abs_conv) = 11.037702560424805
np.min(abs_conv) = 1.5832483768463135e-07
1/1 [==============================] - 0s 999us/step
日 | 月 | 火 | 水 | 木 | 金 | 土 |
---|---|---|---|---|---|---|
- | - | - | - | - | 1 | 2 |
3 | 4 | 5 | 6 | 7 | 8 | 9 |
10 | 11 | 12 | 13 | 14 | 15 | 16 |
17 | 18 | 19 | 20 | 21 | 22 | 23 |
24 | 25 | 26 | 27 | 28 | 29 | 30 |
31 | - | - | - | - | - | - |