set_property IOSTANDARD LVCMOS33 [get_ports clk125]
set_property IOSTANDARD LVCMOS33 [get_ports {LD0[0]}]
set_property IOSTANDARD LVCMOS33 [get_ports BTN0]
set_property PACKAGE_PIN K17 [get_ports clk125]
set_property PACKAGE_PIN M14 [get_ports {LD0[0]}]
set_property PACKAGE_PIN K18 [get_ports BTN0]
create_pblock pblock_Blink1
add_cells_to_pblock [get_pblocks pblock_Blink1] [get_cells -quiet [list DFX_test2_i/Blink1]]
resize_pblock [get_pblocks pblock_Blink1] -add {SLICE_X88Y51:SLICE_X97Y99}
resize_pblock [get_pblocks pblock_Blink1] -add {DSP48_X3Y22:DSP48_X3Y39}
resize_pblock [get_pblocks pblock_Blink1] -add {RAMB18_X4Y22:RAMB18_X4Y39}
resize_pblock [get_pblocks pblock_Blink1] -add {RAMB36_X4Y11:RAMB36_X4Y19}
set_property RESET_AFTER_RECONFIG true [get_pblocks pblock_Blink1]
set_property SNAPPING_MODE ROUTING [get_pblocks pblock_Blink1]
// Blink1.cpp
// 2021/07/28 by marsee
//
#include <ap_int.h>
#define PS_CLK_FREQ 125000000 // Hz
#define HALF_PS_CLK_FREQ (PS_CLK_FREQ/2) // Hz
int Blink1(volatile ap_uint<1> &out){
#pragma HLS INTERFACE mode=ap_ctrl_none port=return
#pragma HLS INTERFACE mode=ap_none port=out register
for(int i=0; i<100; i++){
for(int j=0; j<HALF_PS_CLK_FREQ; j++){
#pragma HLS PIPELINE II=1 rewind
out = 1;
ap_wait();
}
for(int j=0; j<HALF_PS_CLK_FREQ; j++){
#pragma HLS PIPELINE II=1 rewind
out = 0;
ap_wait();
}
}
return(0);
}
// Blink05.cpp
// 2021/07/28 by marsee
//
#include <ap_int.h>
#define PS_CLK_FREQ 125000000 // Hz
#define HALF_PS_CLK_FREQ (PS_CLK_FREQ/2) // Hz
#define QUARTER_PS_CLK_FREQ (PS_CLK_FREQ/4) // Hz
int Blink05(volatile ap_uint<1> &out){
#pragma HLS INTERFACE mode=ap_ctrl_none port=return
#pragma HLS INTERFACE mode=ap_none port=out register
for(int i=0; i<200; i++){
for(int j=0; j<QUARTER_PS_CLK_FREQ; j++){
#pragma HLS PIPELINE II=1 rewind
out = 1;
ap_wait();
}
for(int j=0; j<QUARTER_PS_CLK_FREQ; j++){
#pragma HLS PIPELINE II=1 rewind
out = 0;
ap_wait();
}
}
return(0);
}
// DMA_square_root8.c
// 2021/07/26 by marsee
//
#include "xil_io.h"
#include "xdma_square_root8_hw.h"
volatile int data[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
volatile int result[10];
void Xil_DCacheFlush(void);
void Xil_DCacheInvalidate(void);
#define XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR 0x40010000
int main(){
int i;
Xil_Out32((XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR+XDMA_SQUARE_ROOT8_CONTROL_ADDR_DATA_DATA), (u32)&data[0]);
Xil_Out32((XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR+XDMA_SQUARE_ROOT8_CONTROL_ADDR_RESULT_DATA), (u32)&result[0]);
Xil_DCacheFlush(); // Flush data[10] cache to memory
Xil_Out32(XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR,(u32)1); // Start
while(!(Xil_In32(XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR)&0x2)); // Wait ap_done
Xil_DCacheInvalidate(); // Invalidate result[10]
for(i=0; i<10; i++){
printf("data[%d] = %d, result[%d] = %d\n", i, data[i], i, result[i]);
}
return 0;
}
// DMA_pow2_test.c
// 2021/07/25 by marsee
//
#include <stdio.h>
#include "xdma_pow2.h"
#include "xparameters.h"
#include "xil_io.h"
#include "xdma_square_root8_hw.h"
volatile int data[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
volatile int result[10];
void Xil_DCacheFlush(void);
void Xil_DCacheInvalidate(void);
#define XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR 0x40010000
int main(){
XDma_pow2 XDMA_pow2_ap;
XDma_pow2_Config *XDMA_pow2_apPtr;
int i, inbyte_in;
// Look Up the device configuration
XDMA_pow2_apPtr = XDma_pow2_LookupConfig(0);
if (!XDMA_pow2_apPtr){
fprintf(stderr, "XDma_pow2 configuration failed.\n");
return(-1);
}
// Initialize the Device
int Xlap_status = XDma_pow2_CfgInitialize(&XDMA_pow2_ap, XDMA_pow2_apPtr);
if (Xlap_status != XST_SUCCESS){
fprintf(stderr, "Could not Initialize XDma_pow2\n");
return(-1);
}
XDma_pow2_Set_data(&XDMA_pow2_ap, (u32)&data[0]);
XDma_pow2_Set_result(&XDMA_pow2_ap, (u32)&result[0]);
Xil_DCacheFlush(); // Flush data[10] cache to memory
XDma_pow2_Start(&XDMA_pow2_ap);
while(!XDma_pow2_IsDone(&XDMA_pow2_ap)) ;
Xil_DCacheInvalidate(); // Invalidate result[10]
for(i=0; i<10; i++){
printf("data[%d] = %d, result[%d] = %d\n", i, data[i], i, result[i]);
}
printf("Configure the partial bit file in Vivado and then press any key.");
fflush(stdout);
inbyte_in = inbyte();
printf("\n"); fflush(stdout);
Xil_Out32((XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR+XDMA_SQUARE_ROOT8_CONTROL_ADDR_DATA_DATA), (u32)&data[0]);
Xil_Out32((XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR+XDMA_SQUARE_ROOT8_CONTROL_ADDR_RESULT_DATA), (u32)&result[0]);
Xil_DCacheFlush(); // Flush data[10] cache to memory
Xil_Out32(XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR,(u32)1); // Start
while(!(Xil_In32(XPAR_XDMA_SQUARE_ROOT8_0_BASEADDR)&0x2)); // Wait ap_done
Xil_DCacheInvalidate(); // Invalidate result[10]
for(i=0; i<10; i++){
printf("data[%d] = %d, result[%d] = %d\n", i, data[i], i, result[i]);
}
return 0;
}
// DMA_pow2_test.c
// 2021/07/25 by marsee
//
#include <stdio.h>
#include "xdma_pow2.h"
#include "xparameters.h"
volatile int data[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
volatile int result[10];
void Xil_DCacheFlush(void);
void Xil_DCacheInvalidate(void);
int main(){
XDma_pow2 XDMA_pow2_ap;
XDma_pow2_Config *XDMA_pow2_apPtr;
int i;
// Look Up the device configuration
XDMA_pow2_apPtr = XDma_pow2_LookupConfig(0);
if (!XDMA_pow2_apPtr){
fprintf(stderr, "XDma_pow2 configuration failed.\n");
return(-1);
}
// Initialize the Device
int Xlap_status = XDma_pow2_CfgInitialize(&XDMA_pow2_ap, XDMA_pow2_apPtr);
if (Xlap_status != XST_SUCCESS){
fprintf(stderr, "Could not Initialize XDma_pow2\n");
return(-1);
}
XDma_pow2_Set_data(&XDMA_pow2_ap, (u32)&data[0]);
XDma_pow2_Set_result(&XDMA_pow2_ap, (u32)&result[0]);
Xil_DCacheFlush(); // Flush data[10] cache to memory
XDma_pow2_Start(&XDMA_pow2_ap);
while(!XDma_pow2_IsDone(&XDMA_pow2_ap)) ;
Xil_DCacheInvalidate(); // Invalidate result[10]
for(i=0; i<10; i++){
printf("data[%d] = %d, result[%d] = %d\n", i, data[i], i, result[i]);
}
return 0;
}
create_pblock pblock_DMA_pow2
add_cells_to_pblock [get_pblocks pblock_DMA_pow2] [get_cells -quiet [list DFX_test_i/DMA_pow2]]
resize_pblock [get_pblocks pblock_DMA_pow2] -add {SLICE_X88Y51:SLICE_X103Y99}
resize_pblock [get_pblocks pblock_DMA_pow2] -add {DSP48_X3Y22:DSP48_X4Y39}
resize_pblock [get_pblocks pblock_DMA_pow2] -add {RAMB18_X4Y22:RAMB18_X4Y39}
resize_pblock [get_pblocks pblock_DMA_pow2] -add {RAMB36_X4Y11:RAMB36_X4Y19}
set_property RESET_AFTER_RECONFIG true [get_pblocks pblock_DMA_pow2]
set_property SNAPPING_MODE ROUTING [get_pblocks pblock_DMA_pow2]
// DMA_square_root8.cpp
// 2021/07/01 by marsee
//
#include <stdint.h>
int DMA_square_root8(volatile int32_t *data, volatile int32_t *result){
#pragma HLS INTERFACE mode=s_axilite port=return
#pragma HLS INTERFACE mode=m_axi depth=10 port=result offset=slave
#pragma HLS INTERFACE mode=m_axi depth=10 port=data offset=slave
int32_t temp, data_temp;
int32_t square;
for(int j=0; j<10; j++){
#pragma HLS PIPELINE II=1
temp = 0;
data_temp = data[j];
for(int i=7; i>=0; --i){
temp += (1 << i);
square = temp * temp;
if(square > data_temp){
temp -= (1 << i);
}
}
result[j] = temp;
}
return(0);
}
// DMA_square_root8_tb.cpp
// 2021/07/01 by marsee
#include <stdio.h>
#include <stdint.h>
int DMA_square_root8(volatile int32_t *data, volatile int32_t *result);
int main(){
int data[10] = {8, 10, 12, 14, 16, 18, 20, 22, 24, 26};
int result[10];
DMA_square_root8(data, result);
for(int i=0; i<10; i++){
printf("data[%d] = %d, result[%d] = %d \n", i, data[i], i, result[i]);
}
return(0);
}
// DMA_pow2.cpp
// 2021/07/18 by marsee
//
#include <stdint.h>
int DMA_pow2(volatile int32_t *data, volatile int32_t *result){
#pragma HLS INTERFACE s_axilite port=result
#pragma HLS INTERFACE s_axilite port=data
#pragma HLS INTERFACE m_axi depth=10 port=result offset=slave
#pragma HLS INTERFACE m_axi depth=10 port=data offset=slave
#pragma HLS INTERFACE mode=s_axilite port=return
for (int i=0; i<10; i++){
int temp = data[i];
result[i] = temp * temp;
}
return(0);
}
// DMA_pow2_tb.cpp
// 2021/07/ by marsee
//
#include <iostream>
int DMA_pow2(volatile int32_t *data, volatile int32_t *result);
int main(){
int data[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
int result[10];
DMA_pow2(data, result);
for(int i=0; i<10; i++){
std::cout << "data[" << i << "]= " << data[i] <<
", result[" << i << "] = " <<
result[i] << std::endl;
}
}
この図に示すように、Reconfig Block A にインプリメントされたファンクションは、A1.bit、A2.bit、A3.bit、または A4.bit のいずれかのパーシャル BIT ファイルをダウンロードすることにより変更できます。FPGA デザインのロジックは、リコンフィギャラブル ロジックとスタティック ロジックの 2 種類に分類できます。FPGA ブロックの灰色の部分はスタティック ロジックを表し、Reconfig Block "A" と示された部分はリコンフィギャラブル ロジックを表します。スタティック ロジックは動作し続け、パーシャル BIT ファイルの読み込みの影響は受けません。リコンフィギャラブル ロジックは、パーシャル BIT ファイルの内容に置き換えられます。
設計に関する考察事項
デザインの要件とガイドライン
• Dynamic Function eXchange には、Vivado 2013.3 以降を使用する必要があります。
• リコンフィギャラブル領域を定義するには、エレメント タイプごとにフロアプランが必要です。
• 標準のタイミング制約がサポートされ、必要に応じて追加のタイミング バジェット機能も使用できます。
• デザインを完成させるために便利な専用のデザイン ルール チェック (DRC) が用意されています。
• DFX デザインでは、パーシャル リコンフィギュレーションの開始だけでなく、FPGA 内またはシステム デザインの一部としてパーシャル BIT ファイルを配布することも考慮する必要があります。
• Vivado Design Suite では、Dynamic Function eXchange (DFX) Controller IP がサポートされます。このカスタマイズ可能な IP は、ザイリンクス デバイスでのパーシャル リコンフィギュレーションのコア タスクを管理します。
• RP には、そのパーティションにインプリメントされるさまざまなリコンフィギャラブル モジュール (RM) で使用されるすべてのピンを含める必要があります。RM でほかの RM とは異なる入力または出力が使用される場合、結果の RM の入力または出力がその RM 内で接続されなくなる可能性があります。この状況がある場合、ツールでRM 内の未使用の入力または出力すべてに対して LUT1 バッファーが挿入されます。出力 LUT1 は定数値に接続されます。この定数値は、未使用の出力ピンの HD.PARTPIN_TIEOFF プロパティにより制御できます。このプロパティの詳細は、ブラック ボックスを参照してください。
• ビットストリームの生成でブラック ボックスがサポートされます。ポートを定数値に固定する方法の詳細は、ブラック ボックスを参照してください。
• ユーザー リセット信号では、RM 内のロジックがレベルを認識するのかエッジを認識するのかを決定してください。リセット回路がレベルを認識する場合 (FIFO などの一部 IP の場合など)、リコンフィギュレーションが完了するまで RM リセットは適用しないでください。
• DFX デザインは、Zynq MPSoC デバイスのザイリンクス アイソレーション デザイン フロー (IDF) と互換性があります。このソリューションの詳細は、『UltraScale+ FPGA および Zynq UltraScale+ MPSoCs のアイソレーション デザイン フロー』 (XAPP1335) を参照してください。
1. スタティック モジュールとリコンフィギャラブル モジュール (RM) を個別に合成します。詳細は、合成 を参照してください。
2. 物理制約 (Pblock) を作成してリコンフィギャラブル領域を定義します。詳細は、リコンフィギャラブル領域のフロアプランを作成 を参照してください。
3. 各リコンフィギャラブル パーティション (RP) に HD.RECONFIGURABLE プロパティを設定します。詳細は、モジュールをリコンフィギャラブルと定義 を参照してください。
4. 完全なデザイン (スタティック モジュールと、RP ごとに 1 つの RM) をコンテキストでインプリメントします。詳細は、インプリメンテーション を参照してください。
5. 完全に配線済みのデザインのデザイン チェックポイントを保存します。詳細は、インプリメンテーション を参照してください。
6. このデザインから RM を削除し、スタティック部分のみのデザイン チェックポイントを保存します。詳細は、インプリメンテーション を参照してください。
7. スタティック部分の配置と配線を固定します。詳細は、インプリメンテーション データの保持 を参照してください。
8. スタティック デザインに新しい RM を追加し、新しいコンフィギュレーションをインプリメントして、配線済みデザイン全体のチェックポイントを保存します。
9. すべての RM がインプリメントされるまで手順 8 を繰り返します。
10. すべてのコンフィギュレーションに 対して検証ユーティリティ (pr_verify) を実行します。詳細は、コンフィギュレーションの検証 を参照してください。
11. 各コンフィギュレーションのビットストリームを作成します。詳細は、ビットストリームの生成 を参照してください。
dmesg -D
cd dpu_sw_optimize/zynqmp
./zynqmp_dpu_optimize.sh
export DISPLAY=:0.0
xrandr --output DP-1 --mode 640x480
キャリブレーションで使用するために生成される画像の数は、0_setenv.shスクリプトのCALIB_IMAGES環境変数によって設定されます。キャリブレーションの反復回数(--calib_iter)にキャリブレーションバッチサイズ(image_input_fn.pyスクリプトで設定)を掛けたものが、使用可能なイメージの総数(CALIB_IMAGES)を超えないように注意する必要があります。
量子化が完了すると、。/ files / build / quantizeフォルダーに量子化されたデプロイメントモデル(deploy_model.pb)と評価モデル(quantize_eval_model.pb)が作成されます。
(vitis-ai-tensorflow) Vitis-AI /workspace/dobble_classification_on_ultra96v2/files > source ./4_quant.sh
-----------------------------------------
QUANTIZE STARTED..
-----------------------------------------
Making calibration images..
Using TensorFlow backend.
Command line options:
--image_dir : ./build/quantize/images
--calib_list : calib_list.txt
--dataset : train
--max_images : 1000
Calib images generated
Vai_q_tensorflow v1.2.0 build for Tensorflow 1.15.2 git version
heads/1.3-0-gc680f744
100% (10 of 10) |########################| Elapsed Time: 0:04:41 Time: 0:04:41
INFO: Checking Float Graph...
INFO: Float Graph Check Done.
INFO: Calibrating for 10 iterations...
INFO: Calibration Done.
INFO: Generating Deploy Model...
INFO: Deploy Model Generated.
********************* Quantization Summary *********************
INFO: Output:
quantize_eval_model: ./build/quantize/quantize_eval_model.pb
deploy_model: ./build/quantize/deploy_model.pb
-----------------------------------------
QUANTIZE COMPLETED
-----------------------------------------
DPU IPはソフトコアIPであり、その唯一の機能は畳み込みニューラルネットワークの実行を加速することです。これは、独自の命令セットを持つコプロセッサーです。これらの命令は、Xmodelファイル形式でDPUに渡されます。
Vitis AIコンパイラは、量子化された展開モデルを一連のマイクロ命令に変換し、可能な場合は最適化してから、Xmodelファイルに出力します。
生成される命令は、DPUの特定の構成に固有です。DPUのパラメーターは、ターゲットボードごとに作成する必要があるarch.jsonファイルに含まれています。詳細については、VitisAIユーザーガイドを参照してください。
(vitis-ai-tensorflow) Vitis-AI /workspace/dobble_classification_on_ultra96v2/files > source ./6_compile.sh
-----------------------------------------
COMPILE STARTED..
-----------------------------------------
[INFO] parse raw model : 0%| | 0/33 [00:00<?, ?it/s]
[INFO] parse raw model :100%|██████████| 33/33 [00:00<00:00, 18611.27it/s]
[INFO] infer shape (NHWC) : 0%| | 0/38 [00:00<?, ?it/s]
[INFO] infer shape (NHWC) :100%|██████████| 38/38 [00:00<00:00, 14147.31it/s]
[INFO] infer shape (NHWC) : 0%| | 0/35 [00:00<?, ?it/s]
[INFO] infer shape (NHWC) :100%|██████████| 35/35 [00:00<00:00, 5574.99it/s]
[INFO] generate xmodel : 0%| | 0/35 [00:00<?, ?it/s]
[INFO] generate xmodel : 9%|▊ | 3/35 [00:00<00:05, 5.49it/s]
[INFO] generate xmodel :100%|██████████| 35/35 [00:00<00:00, 63.60it/s]
[INFO] Namespace(inputs_shape=None, layout='NHWC', model_files=['./build/quantize/quantize_eval_model.pb'], model_type='tensorflow', out_filename='./build/compile_B2304_LR/dobble_org.xmodel', proto=None)
[INFO] tensorflow model: build/quantize/quantize_eval_model.pb
[INFO] generate xmodel: /workspace/dobble_classification_on_ultra96v2/files/build/compile_B2304_LR/dobble_org.xmodel
[UNILOG][INFO] The compiler log will be dumped at "/tmp/vitis-ai-user/log/xcompiler-20210713-134253-298"
[UNILOG][INFO] Target architecture: DPUCZDX8G_ISA0_B2304_MAX_BG2
[UNILOG][INFO] Compile mode: dpu
[UNILOG][INFO] Debug mode: function
[UNILOG][INFO] Target architecture: DPUCZDX8G_ISA0_B2304_MAX_BG2
[UNILOG][INFO] Graph name: quantize_eval_model, with op num: 55
[UNILOG][INFO] Begin to compile...
[UNILOG][INFO] Total device subgraph number 3, DPU subgraph number 1
[UNILOG][INFO] Compile done.
[UNILOG][INFO] The meta json is saved to "/workspace/dobble_classification_on_ultra96v2/files/./build/compile_B2304_LR/meta.json"
[UNILOG][INFO] The compiled xmodel is saved to "/workspace/dobble_classification_on_ultra96v2/files/./build/compile_B2304_LR/dobble.xmodel"
[UNILOG][INFO] The compiled xmodel's md5sum is 5cc2ffa0cc1e1c45e439fdc62268f1ae, and been saved to "/workspace/dobble_classification_on_ultra96v2/files/./build/compile_B2304_LR/md5sum.txt"
**************************************************
* VITIS_AI Compilation - Xilinx Inc.
**************************************************
-----------------------------------------
COMPILE COMPLETED
-----------------------------------------
(vitis-ai-tensorflow) Vitis-AI /workspace/dobble_classification_on_ultra96v2/files > source ./3_eval_frozen.sh
-----------------------------------------
EVALUATING THE FROZEN GRAPH..
-----------------------------------------
Using TensorFlow backend.
100% (12 of 12) |########################| Elapsed Time: 0:00:10 Time: 0:00:10
------------------------------------
TensorFlow version : 1.15.2
3.6.12 |Anaconda, Inc.| (default, Sep 8 2020, 23:10:56)
[GCC 7.3.0]
------------------------------------
Command line options:
--graph : ./build/freeze/frozen_graph.pb
--input_node : conv2d_1_input
--output_node: activation_2/Softmax
--batchsize : 100
--gpu : 0
------------------------------------
TEST DATA SETS:
Shape of test data (X) is : (1267, 224, 224, 3)
Shape of test data (y) is : (1267, 58)
Graph accuracy with validation dataset: 0.9942
-----------------------------------------
EVALUATION COMPLETED
-----------------------------------------
#input_video = 0 # laptop camera
input_video = 1 # USB webcam
Rotation - has already been done on line 177
Horizontal and vertical flip - not good for our dataset (also, currently doing on line 179?)
featurewise_center
samplewise_center
featurewise_std_normalization
zca_epsilon
zca_whitening
shear_range
channel_shift_range
(base) masaaki@masaaki-H110M4-M01:/media/masaaki/Ubuntu_Disk/AI/dobble_buddy$ python dobble_tutorial.py
PARAMETERS:
Normalized shape of images : 224 x 224
Card Decks : 10 ['dobble_deck01_cards_57-augmented', 'dobble_deck02_cards_55', 'dobble_deck03_cards_55-augmented', 'dobble_deck04_cards_55', 'dobble_deck05_cards_55-augmented', 'dobble_deck06_cards_55', 'dobble_deck07_cards_55-augmented', 'dobble_deck08_cards_55', 'dobble_deck09_cards_55-augmented', 'dobble_deck10_cards_55']
TRAINING/VALIDATION DATA SETS:
Shape of training data (X) is : (22465, 224, 224, 3)
Shape of training data (y) is : (22465,)
Shape of validation data (X) is : (5617, 224, 224, 3)
Shape of validation data (y) is : (5617,)
2021-07-10 17:53:35.693689: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2021-07-10 17:53:36.012876: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3199980000 Hz
2021-07-10 17:53:36.057387: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x559761433360 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-07-10 17:53:36.057436: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-07-10 17:53:36.281905: I tensorflow/core/common_runtime/process_util.cc:147] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
MODEL SUMMARY:
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 32) 896
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 64) 18496
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 52, 52, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 26, 26, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 24, 24, 128) 147584
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 128) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 18432) 0
_________________________________________________________________
dense (Dense) (None, 512) 9437696
_________________________________________________________________
activation (Activation) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 58) 29754
_________________________________________________________________
activation_1 (Activation) (None, 58) 0
=================================================================
Total params: 9,708,282
Trainable params: 9,708,282
Non-trainable params: 0
_________________________________________________________________
TRAIN MODEL:
Epoch 1/59
702/702 [==============================] - 649s 925ms/step - loss: 3.3797 - val_loss: 2.4933
Epoch 2/59
702/702 [==============================] - 619s 881ms/step - loss: 1.8767 - val_loss: 1.2642
Epoch 3/59
702/702 [==============================] - 620s 883ms/step - loss: 0.9784 - val_loss: 0.6677
Epoch 4/59
702/702 [==============================] - 623s 888ms/step - loss: 0.5961 - val_loss: 0.4000
Epoch 5/59
702/702 [==============================] - 619s 882ms/step - loss: 0.5095 - val_loss: 0.5559
Epoch 6/59
702/702 [==============================] - 621s 884ms/step - loss: 0.4237 - val_loss: 0.2355
Epoch 7/59
702/702 [==============================] - 618s 880ms/step - loss: 0.2760 - val_loss: 0.2104
Epoch 8/59
702/702 [==============================] - 618s 880ms/step - loss: 0.2350 - val_loss: 0.1733
Epoch 9/59
702/702 [==============================] - 617s 879ms/step - loss: 0.2408 - val_loss: 0.1829
Epoch 10/59
702/702 [==============================] - 614s 875ms/step - loss: 0.2283 - val_loss: 0.3288
Epoch 11/59
702/702 [==============================] - 613s 874ms/step - loss: 0.2097 - val_loss: 0.1317
Epoch 12/59
702/702 [==============================] - 616s 877ms/step - loss: 0.1609 - val_loss: 0.1008
Epoch 13/59
702/702 [==============================] - 615s 876ms/step - loss: 0.1632 - val_loss: 0.1554
Epoch 14/59
702/702 [==============================] - 627s 894ms/step - loss: 0.1713 - val_loss: 0.0993
Epoch 15/59
702/702 [==============================] - 622s 886ms/step - loss: 0.1276 - val_loss: 0.2646
Epoch 16/59
702/702 [==============================] - 617s 878ms/step - loss: 0.1852 - val_loss: 0.1097
Epoch 17/59
702/702 [==============================] - 619s 882ms/step - loss: 0.1387 - val_loss: 0.1637
Epoch 18/59
702/702 [==============================] - 617s 878ms/step - loss: 0.1229 - val_loss: 0.1576
Epoch 19/59
702/702 [==============================] - 616s 877ms/step - loss: 0.1321 - val_loss: 0.1307
Epoch 20/59
702/702 [==============================] - 617s 879ms/step - loss: 0.1246 - val_loss: 0.0790
Epoch 21/59
702/702 [==============================] - 614s 875ms/step - loss: 0.1165 - val_loss: 0.0906
Epoch 22/59
702/702 [==============================] - 614s 875ms/step - loss: 0.1205 - val_loss: 0.1210
Epoch 23/59
702/702 [==============================] - 614s 875ms/step - loss: 0.1106 - val_loss: 0.0839
Epoch 24/59
702/702 [==============================] - 614s 875ms/step - loss: 0.0977 - val_loss: 0.0636
Epoch 25/59
702/702 [==============================] - 616s 877ms/step - loss: 0.1171 - val_loss: 0.1314
Epoch 26/59
702/702 [==============================] - 634s 903ms/step - loss: 0.1127 - val_loss: 0.0733
Epoch 27/59
702/702 [==============================] - 635s 904ms/step - loss: 0.1047 - val_loss: 0.0715
Epoch 28/59
702/702 [==============================] - 617s 880ms/step - loss: 0.1182 - val_loss: 0.0712
Epoch 29/59
702/702 [==============================] - 635s 904ms/step - loss: 0.0948 - val_loss: 0.0857
Epoch 30/59
702/702 [==============================] - 618s 881ms/step - loss: 0.1238 - val_loss: 0.0927
Epoch 31/59
702/702 [==============================] - 617s 878ms/step - loss: 0.0966 - val_loss: 0.0701
Epoch 32/59
702/702 [==============================] - 617s 879ms/step - loss: 0.0970 - val_loss: 0.0876
Epoch 33/59
702/702 [==============================] - 617s 880ms/step - loss: 0.1322 - val_loss: 0.0762
Epoch 34/59
702/702 [==============================] - 617s 878ms/step - loss: 0.0835 - val_loss: 0.0815
Epoch 35/59
702/702 [==============================] - 617s 879ms/step - loss: 0.1001 - val_loss: 0.0716
Epoch 36/59
702/702 [==============================] - 616s 878ms/step - loss: 0.1000 - val_loss: 0.0888
Epoch 37/59
702/702 [==============================] - 618s 880ms/step - loss: 0.1183 - val_loss: 0.0640
Epoch 38/59
702/702 [==============================] - 618s 880ms/step - loss: 0.1058 - val_loss: 0.0871
Epoch 39/59
702/702 [==============================] - 617s 878ms/step - loss: 0.1179 - val_loss: 0.0759
Epoch 40/59
702/702 [==============================] - 617s 879ms/step - loss: 0.1015 - val_loss: 0.1003
Epoch 41/59
702/702 [==============================] - 617s 879ms/step - loss: 0.1082 - val_loss: 0.0679
Epoch 42/59
702/702 [==============================] - 617s 880ms/step - loss: 0.0968 - val_loss: 0.0693
Epoch 43/59
702/702 [==============================] - 616s 878ms/step - loss: 0.1161 - val_loss: 0.1391
Epoch 44/59
702/702 [==============================] - 616s 877ms/step - loss: 0.0920 - val_loss: 0.0960
Epoch 45/59
702/702 [==============================] - 618s 880ms/step - loss: 0.0870 - val_loss: 0.1002
Epoch 46/59
702/702 [==============================] - 616s 878ms/step - loss: 0.1016 - val_loss: 0.1532
Epoch 47/59
702/702 [==============================] - 614s 874ms/step - loss: 0.0802 - val_loss: 0.0577
Epoch 48/59
702/702 [==============================] - 615s 876ms/step - loss: 0.0731 - val_loss: 0.0723
Epoch 49/59
702/702 [==============================] - 614s 875ms/step - loss: 0.0901 - val_loss: 0.0977
Epoch 50/59
702/702 [==============================] - 616s 877ms/step - loss: 0.0947 - val_loss: 0.1096
Epoch 51/59
702/702 [==============================] - 615s 876ms/step - loss: 0.0747 - val_loss: 0.0812
Epoch 52/59
702/702 [==============================] - 615s 876ms/step - loss: 0.0922 - val_loss: 0.1069
Epoch 53/59
702/702 [==============================] - 615s 876ms/step - loss: 0.1031 - val_loss: 0.0655
Epoch 54/59
702/702 [==============================] - 615s 877ms/step - loss: 0.0882 - val_loss: 0.0971
Epoch 55/59
702/702 [==============================] - 615s 876ms/step - loss: 0.1150 - val_loss: 0.0693
Epoch 56/59
702/702 [==============================] - 614s 875ms/step - loss: 0.0963 - val_loss: 0.0707
Epoch 57/59
702/702 [==============================] - 615s 877ms/step - loss: 0.0993 - val_loss: 0.0638
Epoch 58/59
702/702 [==============================] - 616s 877ms/step - loss: 0.1011 - val_loss: 0.0984
Epoch 59/59
702/702 [==============================] - 615s 876ms/step - loss: 0.1028 - val_loss: 0.1870
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 32) 896
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 64) 18496
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 52, 52, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 26, 26, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 24, 24, 128) 147584
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 128) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 18432) 0
_________________________________________________________________
dense (Dense) (None, 512) 9437696
_________________________________________________________________
activation (Activation) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 58) 29754
_________________________________________________________________
activation_1 (Activation) (None, 58) 0
=================================================================
Total params: 9,708,282
Trainable params: 9,708,282
Non-trainable params: 0
_________________________________________________________________
Shape of test data (X) is : (12, 224, 224, 3)
Shape of test data (y) is : (12, 58)
EVALUATE MODEL:
1/1 [==============================] - 0s 266us/step - loss: 23.8451
./dobble_dataset/dobble_test01_cards : Test Accuracy = 0.9166666666666666
(base) masaaki@masaaki-H110M4-M01:/media/masaaki/Ubuntu_Disk/AI/dobble_buddy$ python dobble_test.py
2021-07-11 05:01:50.044420: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2021-07-11 05:01:50.136887: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3199980000 Hz
2021-07-11 05:01:50.137389: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x563dae8954c0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-07-11 05:01:50.137433: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-07-11 05:01:50.138320: I tensorflow/core/common_runtime/process_util.cc:147] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
Shape of test data (X) is : (1267, 224, 224, 3)
Shape of test data (y) is : (1267, 58)
EVALUATE MODEL:
40/40 [==============================] - 9s 221ms/step - loss: 4.3736
./dobble_dataset/dobble_test02_cards : Test Accuracy = 0.6803472770323599
0.50% accuracy bound: 0.6716 - 0.6891
0.80% accuracy bound: 0.6636 - 0.6971
0.90% accuracy bound: 0.6589 - 0.7018
0.95% accuracy bound: 0.6547 - 0.7060
0.99% accuracy bound: 0.6465 - 0.7141
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 32) 896
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 64) 18496
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 52, 52, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 26, 26, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 24, 24, 128) 147584
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 128) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 18432) 0
_________________________________________________________________
dense (Dense) (None, 512) 9437696
_________________________________________________________________
activation (Activation) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 58) 29754
_________________________________________________________________
activation_1 (Activation) (None, 58) 0
=================================================================
Total params: 9,708,282
Trainable params: 9,708,282
Non-trainable params: 0
_________________________________________________________________
(base) masaaki@masaaki-H110M4-M01:/media/masaaki/Ubuntu_Disk/AI/dobble_buddy$ python dobble_tutorial.py
PARAMETERS:
Normalized shape of images : 224 x 224
Card Decks : 10 ['dobble_deck01_cards_57', 'dobble_deck02_cards_55', 'dobble_deck03_cards_55', 'dobble_deck04_cards_55', 'dobble_deck05_cards_55', 'dobble_deck06_cards_55', 'dobble_deck07_cards_55', 'dobble_deck08_cards_55', 'dobble_deck09_cards_55', 'dobble_deck10_cards_55']
TRAINING/VALIDATION DATA SETS:
Shape of training data (X) is : (449, 224, 224, 3)
Shape of training data (y) is : (449,)
Shape of validation data (X) is : (113, 224, 224, 3)
Shape of validation data (y) is : (113,)
2021-07-09 04:58:02.724720: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2021-07-09 04:58:02.746653: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3199980000 Hz
2021-07-09 04:58:02.746905: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x555b709c3a20 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-07-09 04:58:02.746924: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-07-09 04:58:02.746997: I tensorflow/core/common_runtime/process_util.cc:147] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
MODEL SUMMARY:
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 32) 896
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 64) 18496
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 52, 52, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 26, 26, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 24, 24, 128) 147584
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 128) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 18432) 0
_________________________________________________________________
dense (Dense) (None, 512) 9437696
_________________________________________________________________
activation (Activation) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 58) 29754
_________________________________________________________________
activation_1 (Activation) (None, 58) 0
=================================================================
Total params: 9,708,282
Trainable params: 9,708,282
Non-trainable params: 0
_________________________________________________________________
TRAIN MODEL:
Epoch 1/59
14/14 [==============================] - 14s 975ms/step - loss: 4.1059 - val_loss: 4.0662
Epoch 2/59
14/14 [==============================] - 13s 959ms/step - loss: 4.0710 - val_loss: 4.1179
Epoch 3/59
14/14 [==============================] - 13s 936ms/step - loss: 4.0521 - val_loss: 4.1025
Epoch 4/59
14/14 [==============================] - 13s 939ms/step - loss: 4.0509 - val_loss: 4.1296
Epoch 5/59
14/14 [==============================] - 13s 948ms/step - loss: 4.0449 - val_loss: 4.0707
Epoch 6/59
14/14 [==============================] - 13s 950ms/step - loss: 4.0505 - val_loss: 4.1071
Epoch 7/59
14/14 [==============================] - 13s 958ms/step - loss: 4.0433 - val_loss: 4.1346
Epoch 8/59
14/14 [==============================] - 13s 954ms/step - loss: 4.0504 - val_loss: 4.0856
Epoch 9/59
14/14 [==============================] - 13s 964ms/step - loss: 4.0330 - val_loss: 4.1261
Epoch 10/59
14/14 [==============================] - 14s 983ms/step - loss: 3.9718 - val_loss: 3.9883
Epoch 11/59
14/14 [==============================] - 13s 933ms/step - loss: 3.7644 - val_loss: 4.0952
Epoch 12/59
14/14 [==============================] - 13s 933ms/step - loss: 3.6456 - val_loss: 3.6974
Epoch 13/59
14/14 [==============================] - 13s 951ms/step - loss: 3.3521 - val_loss: 3.5460
Epoch 14/59
14/14 [==============================] - 13s 962ms/step - loss: 3.2253 - val_loss: 3.2729
Epoch 15/59
14/14 [==============================] - 15s 1s/step - loss: 2.9422 - val_loss: 3.2000
Epoch 16/59
14/14 [==============================] - 13s 962ms/step - loss: 2.7437 - val_loss: 3.0561
Epoch 17/59
14/14 [==============================] - 13s 962ms/step - loss: 2.6164 - val_loss: 3.0905
Epoch 18/59
14/14 [==============================] - 13s 950ms/step - loss: 2.5613 - val_loss: 2.7422
Epoch 19/59
14/14 [==============================] - 13s 948ms/step - loss: 2.5242 - val_loss: 2.9230
Epoch 20/59
14/14 [==============================] - 13s 954ms/step - loss: 2.3480 - val_loss: 2.6601
Epoch 21/59
14/14 [==============================] - 14s 1s/step - loss: 2.1049 - val_loss: 2.5116
Epoch 22/59
14/14 [==============================] - 14s 967ms/step - loss: 1.9024 - val_loss: 2.4045
Epoch 23/59
14/14 [==============================] - 13s 944ms/step - loss: 1.7881 - val_loss: 2.3397
Epoch 24/59
14/14 [==============================] - 13s 935ms/step - loss: 1.6218 - val_loss: 2.3310
Epoch 25/59
14/14 [==============================] - 13s 960ms/step - loss: 1.4528 - val_loss: 1.9856
Epoch 26/59
14/14 [==============================] - 13s 950ms/step - loss: 1.4757 - val_loss: 1.9892
Epoch 27/59
14/14 [==============================] - 13s 947ms/step - loss: 1.2487 - val_loss: 1.8838
Epoch 28/59
14/14 [==============================] - 13s 945ms/step - loss: 1.0968 - val_loss: 1.8848
Epoch 29/59
14/14 [==============================] - 14s 999ms/step - loss: 1.3052 - val_loss: 1.8807
Epoch 30/59
14/14 [==============================] - 13s 942ms/step - loss: 1.1706 - val_loss: 1.8130
Epoch 31/59
14/14 [==============================] - 13s 947ms/step - loss: 0.9345 - val_loss: 1.4584
Epoch 32/59
14/14 [==============================] - 13s 934ms/step - loss: 1.0239 - val_loss: 1.6938
Epoch 33/59
14/14 [==============================] - 13s 942ms/step - loss: 0.8465 - val_loss: 1.0062
Epoch 34/59
14/14 [==============================] - 13s 947ms/step - loss: 0.7187 - val_loss: 1.5815
Epoch 35/59
14/14 [==============================] - 13s 946ms/step - loss: 0.6261 - val_loss: 1.3732
Epoch 36/59
14/14 [==============================] - 13s 944ms/step - loss: 0.5681 - val_loss: 1.6571
Epoch 37/59
14/14 [==============================] - 13s 961ms/step - loss: 0.8443 - val_loss: 0.8410
Epoch 38/59
14/14 [==============================] - 13s 945ms/step - loss: 0.5554 - val_loss: 1.3257
Epoch 39/59
14/14 [==============================] - 13s 933ms/step - loss: 0.5168 - val_loss: 1.3477
Epoch 40/59
14/14 [==============================] - 13s 938ms/step - loss: 0.4323 - val_loss: 1.2225
Epoch 41/59
14/14 [==============================] - 13s 917ms/step - loss: 0.4062 - val_loss: 1.7126
Epoch 42/59
14/14 [==============================] - 13s 931ms/step - loss: 0.4107 - val_loss: 0.6231
Epoch 43/59
14/14 [==============================] - 14s 1s/step - loss: 0.4382 - val_loss: 1.3422
Epoch 44/59
14/14 [==============================] - 13s 937ms/step - loss: 0.3241 - val_loss: 1.4085
Epoch 45/59
14/14 [==============================] - 13s 933ms/step - loss: 0.2740 - val_loss: 0.6285
Epoch 46/59
14/14 [==============================] - 13s 941ms/step - loss: 0.2144 - val_loss: 1.7058
Epoch 47/59
14/14 [==============================] - 14s 967ms/step - loss: 0.4136 - val_loss: 1.6700
Epoch 48/59
14/14 [==============================] - 13s 953ms/step - loss: 0.2629 - val_loss: 1.4909
Epoch 49/59
14/14 [==============================] - 14s 984ms/step - loss: 0.2949 - val_loss: 1.4362
Epoch 50/59
14/14 [==============================] - 14s 1s/step - loss: 0.1808 - val_loss: 1.5405
Epoch 51/59
14/14 [==============================] - 14s 1s/step - loss: 0.1484 - val_loss: 1.7669
Epoch 52/59
14/14 [==============================] - 14s 966ms/step - loss: 0.1679 - val_loss: 1.4220
Epoch 53/59
14/14 [==============================] - 13s 940ms/step - loss: 0.1705 - val_loss: 1.3934
Epoch 54/59
14/14 [==============================] - 14s 967ms/step - loss: 0.1828 - val_loss: 0.5581
Epoch 55/59
14/14 [==============================] - 14s 967ms/step - loss: 0.2769 - val_loss: 1.3616
Epoch 56/59
14/14 [==============================] - 14s 974ms/step - loss: 0.1672 - val_loss: 0.5885
Epoch 57/59
14/14 [==============================] - 14s 969ms/step - loss: 0.1644 - val_loss: 1.4092
Epoch 58/59
14/14 [==============================] - 13s 950ms/step - loss: 0.1321 - val_loss: 1.7724
Epoch 59/59
14/14 [==============================] - 13s 931ms/step - loss: 0.1618 - val_loss: 0.8644
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 32) 896
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 64) 18496
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 52, 52, 128) 73856
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 26, 26, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 24, 24, 128) 147584
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 12, 12, 128) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 128) 0
_________________________________________________________________
flatten (Flatten) (None, 18432) 0
_________________________________________________________________
dense (Dense) (None, 512) 9437696
_________________________________________________________________
activation (Activation) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 58) 29754
_________________________________________________________________
activation_1 (Activation) (None, 58) 0
=================================================================
Total params: 9,708,282
Trainable params: 9,708,282
Non-trainable params: 0
_________________________________________________________________
Shape of test data (X) is : (12, 224, 224, 3)
Shape of test data (y) is : (12, 58)
EVALUATE MODEL:
1/1 [==============================] - 0s 259us/step - loss: 0.9788
./dobble_dataset/dobble_test01_cards : Test Accuracy = 0.9166666666666666
(base) masaaki@masaaki-H110M4-M01:/media/masaaki/Ubuntu_Disk/AI/dobble_buddy$ python dobble_test.py
2021-07-09 05:12:45.148697: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2021-07-09 05:12:45.170665: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3199980000 Hz
2021-07-09 05:12:45.170934: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55eaef3a9300 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-07-09 05:12:45.170993: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-07-09 05:12:45.171209: I tensorflow/core/common_runtime/process_util.cc:147] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
Shape of test data (X) is : (1267, 224, 224, 3)
Shape of test data (y) is : (1267, 58)
EVALUATE MODEL:
40/40 [==============================] - 8s 209ms/step - loss: 6.6324
./dobble_dataset/dobble_test02_cards : Test Accuracy = 0.4617205998421468
0.50% accuracy bound: 0.4523 - 0.4711
0.80% accuracy bound: 0.4438 - 0.4796
0.90% accuracy bound: 0.4388 - 0.4847
0.95% accuracy bound: 0.4343 - 0.4892
0.99% accuracy bound: 0.4256 - 0.4979
ゲームは55枚のカードのデッキを使用し、それぞれに8つの異なるシンボルが印刷されています。2枚のカードは常に1つだけの一致するシンボルを共有します。ゲームの目的は、与えられた2枚のカードに共通のシンボルを最初に発表することです。
opencv-contrib-python
tensorflow
keras
kaggle
SSD : label=1 x,y,w,h=2,1,272,304 confidence=0.906937
PlateDetect : x,y,w,h=103,257,63,21 confidence=0.99977
PlateNum : size=288,96 color=Blue number=[jingQ2P6J2]
からコマンドを実行しているせいか? Ubuntu 18.04 のメインマシン上にウインドウが開いて画像が表示されている。そのためフレームレートが遅くなっている可能性があるので、ご注意願いたい。ssh ”IPアドレス” -l root -X
日 | 月 | 火 | 水 | 木 | 金 | 土 |
---|---|---|---|---|---|---|
- | - | - | - | 1 | 2 | 3 |
4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 | 12 | 13 | 14 | 15 | 16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 | 26 | 27 | 28 | 29 | 30 | 31 |