シミュレーション波形を下に示す。cd C:/Users/Masaaki/Documents/Vivado_HLS/lap_filter_axis2_2014_4/solution1/sim/verilog/
current_fileset
open_wave_database lap_filter_axis.wdb
open_wave_config lap_filter_axis.wcfg
@I [HLS-10] ----------------------------------------------------------------
@I [HLS-10] -- Scheduling module 'lap_filter_rgb'
@I [HLS-10] ----------------------------------------------------------------
@I [SCHED-11] Starting scheduling ...
@I [SCHED-61] Pipelining loop 'Loop 1'.
@W [SCHED-64] Unable to schedule the loop exit test ('icmp' operation ('tmp')) in the first II cycles (II = 1).
@W [SCHED-64] Unable to schedule the loop exit test ('icmp' operation ('tmp')) in the first II cycles (II = 2).
@W [SCHED-64] Unable to schedule the loop exit test ('icmp' operation ('tmp')) in the first II cycles (II = 3).
@W [SCHED-64] Unable to schedule the loop exit test ('icmp' operation ('tmp')) in the first II cycles (II = 4).
@I [SCHED-61] Pipelining result: Target II: 1, Final II: 5, Depth: 6.
@W [SCHED-21] Estimated clock period (24.6ns) exceeds the target (target clock period: 10ns, clock uncertainty: 1.25ns, effective delay budget: 8.75ns).
@W [SCHED-21] The critical path consists of the following:
'load' operation ('line_buf_0_load', lap_filter_rgb_14_4/lap_filter_rgb.cpp:80) on array 'line_buf[0]' (2.39 ns)
'add' operation ('tmp3', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (2.44 ns)
'add' operation ('sum2_i', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (1.97 ns)
'sub' operation ('sum3_neg_i', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (1.97 ns)
'sub' operation ('tmp_i', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (2.44 ns)
'sub' operation ('tmp_5_i', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (2.44 ns)
'sub' operation ('tmp_6_i', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (2.44 ns)
'sub' operation ('y', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (2.44 ns)
'mul' operation ('y_i88_op', lap_filter_rgb_14_4/lap_filter_rgb.cpp:169->lap_filter_rgb_14_4/lap_filter_rgb.cpp:89) (6.08 ns)
@I [SCHED-11] Finished scheduling.
シミュレーション波形が表示された。cd c:/Users/Masaaki/Documents/Vivado_HLS/ZYBO/lap_filter_rgb_14_4/solution1/sim/verilog
current_fileset
open_wave_database lap_filter_rgb.wdb
open_wave_config lap_filter_rgb.wcfg
//
// lap_fil_rgb_tb.cpp
// 2015/08/22 by marsee
//
#include <stdio.h>
#include <string.h>
#include <ap_int.h>
#include <hls_stream.h>
#include "lap_filter_rgb.h"
#include "bmp_header.h"
#define LOOP_COUNT 5
void lap_filter_rgb(ap_uint<1> lap_fil_enable, hls::stream<vid_in<24> >& video_in, hls::stream<RGB<24> >& video_out);
int laplacian_fil_soft(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y_soft(int rgb);
void lap_filter_rgb_soft(ap_uint<1> lap_fil_enable, hls::stream<vid_in<24> >& video_in, hls::stream<RGB<24> >& video_out);
int main(){
using namespace std;
hls::stream<vid_in<24> > ins;
hls::stream<RGB<24> > outs;
hls::stream<vid_in<24> > ins_soft;
hls::stream<RGB<24> > outs_soft;
vid_in<24> pix;
RGB<24> vals;
RGB<24> vals_soft;
BITMAPFILEHEADER bmpfhr; // BMPファイルのファイルヘッダ(for Read)
BITMAPINFOHEADER bmpihr; // BMPファイルのINFOヘッダ(for Read)
FILE *fbmpr, *fbmpw;
int *rd_bmp, *hw_lapd;
int blue, green, red;
int i, j;
if ((fbmpr = fopen("test.bmp", "rb")) == NULL){ // test.bmp をオープン
fprintf(stderr, "Can't open test.bmp by binary read mode\n");
exit(1);
}
// bmpヘッダの読み出し
fread(&bmpfhr.bfType, sizeof(char), 2, fbmpr);
fread(&bmpfhr.bfSize, sizeof(long), 1, fbmpr);
fread(&bmpfhr.bfReserved1, sizeof(short), 1, fbmpr);
fread(&bmpfhr.bfReserved2, sizeof(short), 1, fbmpr);
fread(&bmpfhr.bfOffBits, sizeof(long), 1, fbmpr);
fread(&bmpihr, sizeof(BITMAPINFOHEADER), 1, fbmpr);
// ピクセルを入れるメモリをアロケートする
if ((rd_bmp =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate rd_bmp memory\n");
exit(1);
}
if ((hw_lapd =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate hw_lapd memory\n");
exit(1);
}
// rd_bmp にBMPのピクセルを代入。その際に、行を逆転する必要がある
for (int y=0; y<bmpihr.biHeight; y++){
for (int x=0; x<bmpihr.biWidth; x++){
blue = fgetc(fbmpr);
green = fgetc(fbmpr);
red = fgetc(fbmpr);
rd_bmp[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] = (blue & 0xff) | ((green & 0xff)<<8) | ((red & 0xff)<<16);
}
}
fclose(fbmpr);
// ins に入力データを用意する
for(int i=0; i<LOOP_COUNT; i++){ // Vertical Sync
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 1;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Vertical back porch
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(j=0; j < bmpihr.biHeight; j++){
for(i=0; i < bmpihr.biWidth; i++){
pix.rgb_pData = (ap_int<32>)rd_bmp[(j*bmpihr.biWidth)+i];
pix.rgb_pVDE = 1;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Horizontal front porch
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Horizontal Sync
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 1;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Horizontal back porch
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
}
for(i=0; i < bmpihr.biWidth; i++){ // No Display, Horizontal Sync
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Horizontal front porch
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Horizontal Sync
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 1;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Horizontal back porch
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 0;
ins << pix;
ins_soft << pix;
}
for(int i=0; i<LOOP_COUNT; i++){ // Vertical Sync
pix.rgb_pData = 0;
pix.rgb_pVDE = 0;
pix.rgb_pHSync = 0;
pix.rgb_pVSync = 1;
ins << pix;
ins_soft << pix;
}
lap_filter_rgb(1, ins, outs);
lap_filter_rgb_soft(1, ins_soft, outs_soft);
// ハードウェアとソフトウェアのラプラシアン・フィルタの値のチェック
cout << endl;
cout << "outs" << endl;
for(j=0; j < bmpihr.biHeight; j++){
for(i=0; i < bmpihr.biWidth; ){
outs >> vals;
outs_soft >> vals_soft;
if (vals.vid_pData != vals_soft.vid_pData || vals.vid_pVDE != vals_soft.vid_pVDE ||
vals.vid_pHSync != vals_soft.vid_pHSync || vals.vid_pVSync != vals_soft.vid_pVSync){
printf("ERROR HW and SW results mismatch i = %ld, j = %ld\n", i, j);
printf("HW : Data = %x, VDE = %x, HSYNC = %X, VSYNC = %x\n", (int)vals.vid_pData, (int)vals.vid_pVDE, (int)vals.vid_pHSync, (int)vals.vid_pVSync);
printf("SW : Data = %x, VDE = %x, HSYNC = %X, VSYNC = %x\n", (int)vals_soft.vid_pData, (int)vals_soft.vid_pVDE, (int)vals_soft.vid_pHSync, (int)vals_soft.vid_pVSync);
return(1);
}
if (!vals.vid_pVDE)
continue;
unsigned int val = (unsigned int)vals.vid_pData;
hw_lapd[(j*bmpihr.biWidth)+i] = (int)val;
i++;
}
}
cout << "Success HW and SW results match" << endl;
cout << endl;
// ハードウェアのラプラシアンフィルタの結果を temp_lap.bmp へ出力する
if ((fbmpw=fopen("temp_lap.bmp", "wb")) == NULL){
fprintf(stderr, "Can't open temp_lap.bmp by binary write mode\n");
exit(1);
}
// BMPファイルヘッダの書き込み
fwrite(&bmpfhr.bfType, sizeof(char), 2, fbmpw);
fwrite(&bmpfhr.bfSize, sizeof(long), 1, fbmpw);
fwrite(&bmpfhr.bfReserved1, sizeof(short), 1, fbmpw);
fwrite(&bmpfhr.bfReserved2, sizeof(short), 1, fbmpw);
fwrite(&bmpfhr.bfOffBits, sizeof(long), 1, fbmpw);
fwrite(&bmpihr, sizeof(BITMAPINFOHEADER), 1, fbmpw);
// RGB データの書き込み、逆順にする
for (int y=0; y<bmpihr.biHeight; y++){
for (int x=0; x<bmpihr.biWidth; x++){
blue = hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] & 0xff;
green = (hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] >> 8) & 0xff;
red = (hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x]>>16) & 0xff;
fputc(blue, fbmpw);
fputc(green, fbmpw);
fputc(red, fbmpw);
}
}
fclose(fbmpw);
free(rd_bmp);
free(hw_lapd);
return 0;
}
void lap_filter_rgb_soft(ap_uint<1> lap_fil_enable, hls::stream<vid_in<24> >& video_in, hls::stream<RGB<24> >& video_out){
enum {idle, vsync_assert, v_back_porch, h_video, h_front_porch, hsync_assert, h_back_porch, v_front_porch, vsync_assert2};
int cstate = idle;
int cstateb = idle;
unsigned int line_buf[2][HORIZONTAL_PIXEL_WIDTH];
int pix_mat[3][3];
int lap_fil_val;
int x=0, y=0;
int y_val;
int val;
int first_h_video;
vid_in<24> pix;
RGB<24> lap;
while(cstate != vsync_assert2){
if(!(cstate==h_video && (cstateb==v_back_porch || cstateb==h_back_porch))){ // v_back_porch と h_back_porch から h_videoになった時はストリームをReadしない
video_in >> pix;
first_h_video = 0;
} else
first_h_video = 1;
cstateb = cstate;
switch (cstate){
case idle :
if (pix.rgb_pVSync)
cstate = vsync_assert;
x = 0; y = 0;
lap_fil_val = pix.rgb_pData;
break;
case vsync_assert :
if (!pix.rgb_pVSync)
cstate = v_back_porch;
lap_fil_val = pix.rgb_pData;
break;
case v_back_porch :
if (pix.rgb_pVDE){
cstate = h_video;
lap_fil_val = 0;
} else
lap_fil_val = pix.rgb_pData;
break;
case h_video :
if (!pix.rgb_pVDE){
cstate = h_front_porch;
lap_fil_val = pix.rgb_pData;
} else {
for (int k=0; k<3; k++){
for (int m=0; m<2; m++){
pix_mat[k][m] = pix_mat[k][m+1];
}
}
pix_mat[0][2] = line_buf[0][x];
pix_mat[1][2] = line_buf[1][x];
y_val = conv_rgb2y_soft((unsigned int)pix.rgb_pData);
pix_mat[2][2] = y_val;
line_buf[0][x] = line_buf[1][x]; // 行の入れ替え
line_buf[1][x] = y_val;
lap_fil_val = laplacian_fil_soft( pix_mat[0][0], pix_mat[0][1], pix_mat[0][2],
pix_mat[1][0], pix_mat[1][1], pix_mat[1][2],
pix_mat[2][0], pix_mat[2][1], pix_mat[2][2]);
lap_fil_val = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val; // RGB蜷後§蛟、繧貞?・繧後k
if (x<2 || y<2) // 最初の2行とその他の行の最初の2列は無効データなので0とする
lap_fil_val = 0;
x++;
if (first_h_video)
continue;
}
break;
case h_front_porch:
if (pix.rgb_pHSync)
cstate = hsync_assert;
lap_fil_val = pix.rgb_pData;
break;
case hsync_assert :
if (!pix.rgb_pHSync)
cstate = h_back_porch;
lap_fil_val = pix.rgb_pData;
break;
case h_back_porch :
if (pix.rgb_pHSync) // rgb_pVDE が来ないうちに rgb_pHSync が来たので表示期間終了
cstate = v_front_porch;
else if (pix.rgb_pVDE){
cstate = h_video;
y++;
x = 0;
lap_fil_val = 0;
} else
lap_fil_val = pix.rgb_pData;
break;
case v_front_porch :
if (pix.rgb_pVSync)
cstate = vsync_assert2;
break;
}
if (cstate == h_video){
if (lap_fil_enable)
lap.vid_pData = (ap_uint<24>)lap_fil_val;
else
lap.vid_pData = pix.rgb_pData;
}else
lap.vid_pData = pix.rgb_pData;
lap.vid_pHSync = pix.rgb_pHSync;
lap.vid_pVDE = pix.rgb_pVDE;
lap.vid_pVSync = pix.rgb_pVSync;
video_out << lap;
}
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y_soft(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil_soft(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
// AXILiteS
// 0x00 : Control signals
// bit 0 - ap_start (Read/Write/COH)
// bit 1 - ap_done (Read/COR)
// bit 2 - ap_idle (Read)
// bit 3 - ap_ready (Read)
// bit 7 - auto_restart (Read/Write)
// others - reserved
// 0x04 : Global Interrupt Enable Register
// bit 0 - Global Interrupt Enable (Read/Write)
// others - reserved
// 0x08 : IP Interrupt Enable Register (Read/Write)
// bit 0 - Channel 0 (ap_done)
// bit 1 - Channel 1 (ap_ready)
// others - reserved
// 0x0c : IP Interrupt Status Register (Read/TOW)
// bit 0 - Channel 0 (ap_done)
// bit 1 - Channel 1 (ap_ready)
// others - reserved
// 0x10 : Data signal of ap_return
// bit 31~0 - ap_return[31:0] (Read)
// 0x18 : Data signal of cam_fb_offset
// bit 31~0 - cam_fb_offset[31:0] (Read/Write)
// 0x1c : reserved
// 0x20 : Data signal of lap_fb_offset
// bit 31~0 - lap_fb_offset[31:0] (Read/Write)
// 0x24 : reserved
// (SC = Self Clear, COR = Clear on Read, TOW = Toggle on Write, COH = Clear on Handshake)
// laplacian_filter3.c
// m_axi offset=slave version
// 2015/08/26
//
#include <stdio.h>
#include <string.h>
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
int lap_filter_axim(volatile int *cam_fb, volatile int *lap_fb)
{
#pragma HLS INTERFACE s_axilite port=return
#pragma HLS INTERFACE m_axi depth=3072 port=cam_fb offset=slave bundle=cam_fb
#pragma HLS INTERFACE m_axi depth=3072 port=lap_fb offset=slave bundle=lap_fb
int line_buf[3][HORIZONTAL_PIXEL_WIDTH];
#pragma HLS array_partition variable=line_buf block factor=3 dim=1
#pragma HLS resource variable=line_buf core=RAM_2P
int lap_buf[HORIZONTAL_PIXEL_WIDTH];
int x, y;
int lap_fil_val;
int a, b;
int fl, sl, tl;
int line_sel;
int prev[3],current[3],next[3]; // 0->1ライン目, 1->2ライン目, 2->3ライン目, prev->1pixel前, current->現在, next->次pixel
#pragma HLS array_partition variable=prev complete dim=0
#pragma HLS array_partition variable=current complete dim=0
#pragma HLS array_partition variable=next complete dim=0
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=0, line_sel=0; y<VERTICAL_PIXEL_WIDTH-1; y++){
// 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
switch(line_sel){
case 1 :
fl = 0; sl = 1; tl = 2;
break;
case 2 :
fl = 1; sl = 2; tl = 0;
break;
case 3 :
fl = 2; sl = 0; tl = 1;
break;
default :
fl = 0; sl = 1; tl = 2;
}
if (y == 1){
#ifndef __SYNTHESIS__
printf("copy 3 lines\n");
#endif
for (a=0; a<3; a++){
// 3ライン分
memcpy(line_buf[a], (const int*)&cam_fb[a*(HORIZONTAL_PIXEL_WIDTH)], HORIZONTAL_PIXEL_WIDTH*sizeof(int));
}
}else{ // 最初のラインではないので、1ラインだけ読み込む。すでに他の2ラインは読み込まれている
memcpy(line_buf[tl], (const int*)&cam_fb[(y+1)*(HORIZONTAL_PIXEL_WIDTH)], HORIZONTAL_PIXEL_WIDTH*sizeof(int));
}
if (y==0 || y==VERTICAL_PIXEL_WIDTH-1){
for(b=0; b<HORIZONTAL_PIXEL_WIDTH; b++){
lap_buf[b] = 0;
}
} else {
next[0] = conv_rgb2y(line_buf[fl][0]);
next[1] = conv_rgb2y(line_buf[sl][0]);
next[2] = conv_rgb2y(line_buf[tl][0]);
for (x = 0; x < HORIZONTAL_PIXEL_WIDTH; x++){
if (x == 0 || x == HORIZONTAL_PIXEL_WIDTH-1){
lap_fil_val = 0;
current[0] = next[0];
next[0] = conv_rgb2y(line_buf[fl][1]);
current[1] = next[1];
next[1] = conv_rgb2y(line_buf[sl][1]);
current[2] = next[2];
next[2] = conv_rgb2y(line_buf[tl][1]);
}else{
prev[0] = current[0];
current[0] = next[0];
next[0] = conv_rgb2y(line_buf[fl][x+1]);
prev[1] = current[1];
current[1] = next[1];
next[1] = conv_rgb2y(line_buf[sl][x+1]);
prev[2] = current[2];
current[2] = next[2];
next[2] = conv_rgb2y(line_buf[tl][x+1]);
#pragma HLS pipeline
lap_fil_val = laplacian_fil(prev[0], current[0], next[0],
prev[1], current[1], next[1],
prev[2], current[2], next[2]);
}
lap_buf[x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val; // RGB同じ値を入れる
}
}
#ifndef __SYNTHESIS__
printf("write back:%d\n", y);
#endif
memcpy((int*)&lap_fb[y*(HORIZONTAL_PIXEL_WIDTH)], (const int*)lap_buf, HORIZONTAL_PIXEL_WIDTH*sizeof(int));
line_sel++;
if (line_sel > 3){
line_sel = 1;
}
}
return(0);
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : line[sl]oat を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
という訳で、わざわざオフセット用の入力ポートを追加する必要が無くなった。• off : オフセッ トアドレス を適用しません。 これがデフォルトです。
• direct : アドレス オフセット を適用する ために 32 ビット ポート を追加します。
• slave : アドレス オフセット を適用する ために AXI4-Lite インターフェイス内に 32 ビット ポート を追加します。
とのことだ。depth オプションの値が小さすぎると、 C/RTL 協調シミュレーシ ョンがデッドロック状態にな り ます。
このように設定するのが良いようだ。#pragma HLS INTERFACE m_axi depth=3072 port=cam_fb offset=slave bundle=cam_fb
#pragma HLS INTERFACE m_axi depth=3072 port=lap_fb offset=slave bundle=lap_fb
// lap_filter_rgb.h
// 2015/08/22
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#ifndef __LAP_FILTER_RGB__
#define __LAP_FILTER_RGB__
#include "ap_int.h"
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
template<int Data>
struct vid_in{
ap_uint<Data> rgb_pData;
ap_uint<1> rgb_pHSync;
ap_uint<1> rgb_pVSync;
ap_uint<1> rgb_pVDE;
};
template<int Data>
struct RGB{
ap_uint<Data> vid_pData;
ap_uint<1> vid_pHSync;
ap_uint<1> vid_pVSync;
ap_uint<1> vid_pVDE;
};
#endif
@E [XFORM-801] Stream port 'video_in.V.rgb_pData.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:19:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_in.V.rgb_pHSync.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:19:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_in.V.rgb_pVSync.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:19:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_in.V.rgb_pVDE.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:19:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_out.V.vid_pData.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:18:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_out.V.vid_pHSync.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:18:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_out.V.vid_pVSync.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:18:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [XFORM-801] Stream port 'video_out.V.vid_pVDE.V' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:16) has invalid interface mode 'ap_none' (lap_filter_rgb_14_4/lap_filter_rgb.cpp:18:1). Stream port only supports ap_hs, ap_fifo and axis modes.
@E [HLS-70] Synthesizability check failed.
//
// lap_filter_rgb.cpp
// 2015/08/21 by marsee
//
#include <stdio.h>
#include <string.h>
#include <ap_int.h>
#include <hls_stream.h>
#include "lap_filter_rgb.h"
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
void lap_filter_rgb(ap_uint<1> lap_fil_enable, hls::stream<vid_in<24> >& video_in, hls::stream<RGB<24> >& video_out){
#pragma HLS INTERFACE ap_ctrl_hs port=return
#pragma HLS INTERFACE ap_hs register port=video_out
#pragma HLS INTERFACE ap_hs register port=video_in
#pragma HLS INTERFACE ap_none register port=lap_fil_enable
enum {idle, vsync_assert, v_back_porch, h_video, h_front_porch, hsync_assert, h_back_porch, v_front_porch, vsync_assert2};
int cstate = idle;
int cstateb = idle;
unsigned int line_buf[2][HORIZONTAL_PIXEL_WIDTH];
#pragma HLS array_partition variable=line_buf block factor=2 dim=1
#pragma HLS resource variable=line_buf core=RAM_2P
int pix_mat[3][3];
#pragma HLS array_partition variable=pix_mat complete
int lap_fil_val;
int x=0, y=0;
int y_val;
int val;
int first_h_video;
vid_in<24> pix;
RGB<24> lap;
while(cstate != vsync_assert2){
#pragma HLS PIPELINE rewind
if(!(cstate==h_video && (cstateb==v_back_porch || cstateb==h_back_porch))){ // v_back_porch と h_back_porch から h_videoになった時はストリームをReadしない
video_in >> pix;
first_h_video = 0;
} else
first_h_video = 1;
cstateb = cstate;
switch (cstate){
case idle :
if (pix.rgb_pVSync)
cstate = vsync_assert;
x = 0; y = 0;
lap_fil_val = pix.rgb_pData;
break;
case vsync_assert :
if (!pix.rgb_pVSync)
cstate = v_back_porch;
lap_fil_val = pix.rgb_pData;
break;
case v_back_porch :
if (pix.rgb_pVDE){
cstate = h_video;
lap_fil_val = 0;
} else
lap_fil_val = pix.rgb_pData;
break;
case h_video :
if (!pix.rgb_pVDE){
cstate = h_front_porch;
lap_fil_val = pix.rgb_pData;
} else {
for (int k=0; k<3; k++){
for (int m=0; m<2; m++){
#pragma HLS UNROLL
pix_mat[k][m] = pix_mat[k][m+1];
}
}
pix_mat[0][2] = line_buf[0][x];
pix_mat[1][2] = line_buf[1][x];
y_val = conv_rgb2y((unsigned int)pix.rgb_pData);
pix_mat[2][2] = y_val;
line_buf[0][x] = line_buf[1][x]; // 行の入れ替え
line_buf[1][x] = y_val;
lap_fil_val = laplacian_fil( pix_mat[0][0], pix_mat[0][1], pix_mat[0][2],
pix_mat[1][0], pix_mat[1][1], pix_mat[1][2],
pix_mat[2][0], pix_mat[2][1], pix_mat[2][2]);
lap_fil_val = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val; // RGB蜷後§蛟、繧貞?・繧後k
if (x<2 || y<2) // 最初の2行とその他の行の最初の2列は無効データなので0とする
lap_fil_val = 0;
x++;
if (first_h_video)
continue;
}
break;
case h_front_porch:
if (pix.rgb_pHSync)
cstate = hsync_assert;
lap_fil_val = pix.rgb_pData;
break;
case hsync_assert :
if (!pix.rgb_pHSync)
cstate = h_back_porch;
lap_fil_val = pix.rgb_pData;
break;
case h_back_porch :
if (pix.rgb_pHSync) // rgb_pVDE が来ないうちに rgb_pHSync が来たので表示期間終了
cstate = v_front_porch;
else if (pix.rgb_pVDE){
cstate = h_video;
y++;
x = 0;
lap_fil_val = 0;
} else
lap_fil_val = pix.rgb_pData;
break;
case v_front_porch :
if (pix.rgb_pVSync)
cstate = vsync_assert2;
break;
}
if (cstate == h_video){
if (lap_fil_enable)
lap.vid_pData = (ap_uint<24>)lap_fil_val;
else
lap.vid_pData = pix.rgb_pData;
}else
lap.vid_pData = pix.rgb_pData;
lap.vid_pHSync = pix.rgb_pHSync;
lap.vid_pVDE = pix.rgb_pVDE;
lap.vid_pVSync = pix.rgb_pVSync;
video_out << lap;
}
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
lap_fil_enable が 1 の時にはラプラシアンフィルタ処理、0 の時には通常のビデオ信号が出力されるはずだ。void lap_filter_rgb(ap_uint<1> lap_fil_enable, ap_uint<24> rgb_pData, ap_uint<1> rgb_pHSync, ap_uint<1> rgb_pVSync, ap_uint<1> rgb_pVDE,
ap_uint<24> *vid_pData, ap_uint<1> *vid_pHSync, ap_uint<1> *vid_pVSync, ap_uint<1> *vid_pVDE);
//
// lap_filter_rgb.cpp
// 2015/08/21 by marsee
//
#include <stdio.h>
#include <string.h>
#include <ap_int.h>
#include "lap_filter_rgb.h"
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
void lap_filter_rgb(ap_uint<1> lap_fil_enable, ap_uint<24> rgb_pData, ap_uint<1> rgb_pHSync, ap_uint<1> rgb_pVSync, ap_uint<1> rgb_pVDE,
ap_uint<24> *vid_pData, ap_uint<1> *vid_pHSync, ap_uint<1> *vid_pVSync, ap_uint<1> *vid_pVDE){
#pragma HLS INTERFACE ap_ctrl_none register port=return
#pragma HLS INTERFACE ap_none register port=vid_pVDE
#pragma HLS INTERFACE ap_none register port=vid_pVSync
#pragma HLS INTERFACE ap_none register port=vid_pHSync
#pragma HLS INTERFACE ap_none register port=vid_pData
#pragma HLS INTERFACE ap_none register port=rgb_pVDE
#pragma HLS INTERFACE ap_none register port=rgb_pVSync
#pragma HLS INTERFACE ap_none register port=rgb_pHSync
#pragma HLS INTERFACE ap_none register port=rgb_pData
#pragma HLS INTERFACE ap_none register port=lap_fil_enable
enum {idle, vsync_assert, v_back_porch, h_video, h_front_porch, hsync_assert, h_back_porch, v_front_porch};
int cstate = idle;
unsigned int pix;
unsigned int line_buf[2][HORIZONTAL_PIXEL_WIDTH];
#pragma HLS array_partition variable=line_buf block factor=2 dim=1
#pragma HLS resource variable=line_buf core=RAM_2P
int pix_mat[3][3];
#pragma HLS array_partition variable=pix_mat complete
int lap_fil_val;
int x, y;
int y_val;
// 一応、初期化
*vid_pHSync=0; *vid_pVSync=0; *vid_pVDE=0; *vid_pData=0;
while(cstate != v_front_porch){
#pragma HLS PIPELINE rewind
switch (cstate){
case idle :
if (rgb_pHSync)
*vid_pHSync = 1;
else
*vid_pHSync = 0;
if (rgb_pVSync){
*vid_pVSync = 1;
cstate = vsync_assert;
}
x = 0; y = 0;
break;
case vsync_assert :
if (rgb_pHSync)
*vid_pHSync = 1;
else
*vid_pHSync = 0;
if (!rgb_pVSync){
*vid_pVSync = 0;
cstate = v_back_porch;
}
break;
case v_back_porch :
if (rgb_pHSync)
*vid_pHSync = 1;
else
*vid_pHSync = 0;
if (rgb_pVDE){
pix = (unsigned int)rgb_pData;
cstate = h_video;
}
break;
case h_video :
if (!(x==0 && y==0)) // 最初の入力はすでに入力されている
pix = (unsigned int)rgb_pData;
for (int k=0; k<3; k++){
for (int m=0; m<2; m++){
#pragma HLS UNROLL
pix_mat[k][m] = pix_mat[k][m+1];
}
}
pix_mat[0][2] = line_buf[0][x];
pix_mat[1][2] = line_buf[1][x];
y_val = conv_rgb2y(pix);
pix_mat[2][2] = y_val;
line_buf[0][x] = line_buf[1][x]; // 行の入れ替え
line_buf[1][x] = y_val;
lap_fil_val = laplacian_fil( pix_mat[0][0], pix_mat[0][1], pix_mat[0][2],
pix_mat[1][0], pix_mat[1][1], pix_mat[1][2],
pix_mat[2][0], pix_mat[2][1], pix_mat[2][2]);
lap_fil_val = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val; // RGB蜷後§蛟、繧貞?・繧後k
if (x<2 || y<2) // 最初の2行とその他の行の最初の2列は無効データなので0とする
lap_fil_val = 0;
if (lap_fil_enable)
*vid_pData = lap_fil_val;
else
*vid_pData = pix;
*vid_pVDE = 1;
if (!rgb_pVDE){
*vid_pVDE = 0;
cstate = h_front_porch;
}
x++;
break;
case h_front_porch:
if (rgb_pHSync){
*vid_pHSync = 1;
cstate = hsync_assert;
}
break;
case hsync_assert :
if (!rgb_pHSync){
*vid_pHSync = 0;
cstate = h_back_porch;
}
break;
case h_back_porch :
if (rgb_pHSync){ // rgb_pVDE が来ないうちに rgb_pHSync が来たので表示期間終了
*vid_pHSync = 1;
cstate = v_front_porch;
} else if (rgb_pVDE){
pix = (unsigned int)rgb_pData;
y++;
cstate = h_video;
}
break;
}
}
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
void lap_filter_rgb(ap_uint<24> rgb_pData, ap_uint<1> rgb_pHSync, ap_uint<1> rgb_pVSync, ap_uint<1>rgb_pVDE,
ap_uint<24> *vid_pData, ap_uint<1> *vid_pHSync, ap_uint<1> *vid_pVSync, ap_uint<1> *vid_pVDE){
/* * laplacian_fitter3.c * * Created on: 2015/08/12 * Author: Masaaki, tu1978 */
#include <stdio.h>
#include <string.h>
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define width 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
void filter_line(unsigned int* lap_buf, unsigned int* fl, unsigned int* sl, unsigned int* tl){
int lap_fil_val;
int prev[3],current[3],next[3]; // 0->1ライン目, 1->2ライン目, 2->3ライン目, prev->1pixel前, current->現在, next->次pixel
#pragma HLS array_partition variable=prev complete dim=0
#pragma HLS array_partition variable=current complete dim=0
#pragma HLS array_partition variable=next complete dim=0
int x;
next[0] = conv_rgb2y(fl[0]);
next[1] = conv_rgb2y(sl[0]);
next[2] = conv_rgb2y(tl[0]);
for (x = 0; x < HORIZONTAL_PIXEL_WIDTH; x++){
#pragma HLS pipeline
if (x == 0 || x == HORIZONTAL_PIXEL_WIDTH-1){
lap_fil_val = 0;
current[0] = next[0];
next[0] = conv_rgb2y(fl[1]);
current[1] = next[1];
next[1] = conv_rgb2y(sl[1]);
current[2] = next[2];
next[2] = conv_rgb2y(tl[1]);
}else{
prev[0] = current[0];
current[0] = next[0];
next[0] = conv_rgb2y(fl[x+1]);
prev[1] = current[1];
current[1] = next[1];
next[1] = conv_rgb2y(sl[x+1]);
prev[2] = current[2];
current[2] = next[2];
next[2] = conv_rgb2y(tl[x+1]);
lap_fil_val = laplacian_fil(prev[0], current[0], next[0],
prev[1], current[1], next[1],
prev[2], current[2], next[2]);
}
lap_buf[x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val; // RGB同じ値を入れる
}
}
//int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height)
int lap_filter_axim(int *cam_fb, int *lap_fb, int width, int height)
{
#pragma HLS INTERFACE m_axi port=cam_fb depth=800 offset=slave
#pragma HLS INTERFACE m_axi port=lap_fb depth=800 offset=slave
#pragma HLS INTERFACE s_axilite port=return
unsigned int line_buf[3][HORIZONTAL_PIXEL_WIDTH];
#pragma HLS array_partition variable line_buf block factor=3 dim=1
#pragma HLS resource variable=line_buf core=RAM_2P
unsigned int lap_buf[HORIZONTAL_PIXEL_WIDTH];
int x, y;
int lap_fil_val;
int a, b;
int fl, sl, tl;
int line_sel;
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=1, line_sel=0; y<VERTICAL_PIXEL_WIDTH-1; y++){
// 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
switch(line_sel){
case 1 :
fl = 0; sl = 1; tl = 2;
break;
case 2 :
fl = 1; sl = 2; tl = 0;
break;
case 3 :
fl = 2; sl = 0; tl = 1;
break;
default :
fl = 0; sl = 1; tl = 2;
}
if (y == 1){
#ifndef __SYNTHESIS__
printf("copy 3 lines\n");
#endif
for (a=0; a<3; a++){
// 3ライン分
memcpy(line_buf[a], (unsigned int*)(&cam_fb[a*(width)]), width*sizeof(int));
}
}else{ // 最初のラインではないので、1ラインだけ読み込む。すでに他の2ラインは読み込まれている
memcpy(line_buf[tl], (unsigned int*)(&cam_fb[(y+1)*(width)]), width*sizeof(int));
}
filter_line(lap_buf, line_buf[fl], line_buf[sl], line_buf[tl]);
#ifndef __SYNTHESIS__
printf("write back:%d\n", y);
#endif
memcpy((unsigned int*)(&lap_fb[y*width]), (unsigned int*)lap_buf, width*sizeof(int));
line_sel++;
if (line_sel > 3){
line_sel = 1;
}
}
// 最初と最後のラインは0にする
for (x = 0; x < HORIZONTAL_PIXEL_WIDTH; x++)
lap_buf[x] = 0;
memcpy((unsigned int*)&lap_fb[0], (unsigned int*)lap_buf, width*sizeof(int));
memcpy((unsigned int*)&lap_fb[width*(height-1)], (unsigned int*)lap_buf, width*sizeof(int));
return(0);
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
// laplacian_filter1.c
// lap_filter_axim()
#include <stdio.h>
#include <string.h>
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define width 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
//int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height)
int lap_filter_axim(int *cam_fb, int *lap_fb, int width, int height)
{
#pragma HLS INTERFACE m_axi port=cam_fb depth=800 offset=slave
#pragma HLS INTERFACE m_axi port=lap_fb depth=800 offset=slave
#pragma HLS INTERFACE s_axilite port=return
unsigned int line_buf[3][ALL_PIXEL_VALUE];
unsigned int lap_buf[ALL_PIXEL_VALUE];
int x, y, i;
int lap_fil_val;
int a, b;
int fl, sl, tl;
int line_sel;
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=0, line_sel=0; y<height; y++){
// 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
switch(line_sel){
case 1 :
fl = 0; sl = 1; tl = 2;
break;
case 2 :
fl = 1; sl = 2; tl = 0;
break;
case 3 :
fl = 2; sl = 0; tl = 1;
break;
default :
fl = 0; sl = 1; tl = 2;
}
for (x=0; x<width; x++){
if (y==0 || y==height-1){ // 縦の境界の時の値は0とする
lap_fil_val = 0;
}else if (x==0 || x==width-1){ // 横の境界の時も値は0とする
lap_fil_val = 0;
}else{
if (x == 1){ // ラインの最初でラインの画素を読み出す
if (y == 1){ // 最初のラインでは3ライン分の画素を読み出す
for (a=0; a<3; a++){ // 3ライン分
memcpy(&line_buf[a][0], (const int*)(&cam_fb[a*(width)]), width*sizeof(int));
for (b=0; b<width; b++){
#pragma HLS PIPELINE
// ライン
line_buf[a][b] = conv_rgb2y(line_buf[a][b]); // カラーから白黒へ
}
}
} else { // 最初のラインではないので、1ラインだけ読み込む。すでに他の2ラインは読み込まれている
memcpy(line_buf[tl], (const int*)(&cam_fb[(y+1)*(width)]), width*sizeof(int));
for (b=0; b<width; b++){
#pragma HLS PIPELINE
// ライン
line_buf[tl][b] = conv_rgb2y(line_buf[tl][b]); // カラーから白黒へ
}
}
}
lap_fil_val = laplacian_fil(line_buf[fl][x-1], line_buf[fl][x], line_buf[fl][x+1], line_buf[sl][x-1], line_buf[sl][x], line_buf[sl][x+1], line_buf[tl][x-1], line_buf[tl][x], line_buf[tl][x+1]);
}
lap_buf[x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val; // RGB同じ値を入れる
}
memcpy(&(lap_fb[y*width]), (const int*)(&lap_buf[0]), width*sizeof(int));
line_sel++;
if (line_sel > 3){
line_sel = 1;
}
}
return(0);
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
ということで、直接ハードウェア化する関数にINTERFACEディレクティブを書くの禁じ手の様だ。#pragma SDS data mem_attribute(A:SEQUENTIAL)属性を使用し、sdscc で自動的に配列転送が AXI4-Stream チャネルにマップされるようにします。
// Testbench of laplacian_filter.c
// BMPデータをハードウェアとソフトウェアで、ラプラシアン・フィルタを掛けて、それを比較する
//
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "sds_lib.h"
#include "bmp_header.h"
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil_soft(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y_soft(int rgb);
int lap_filter_axim(int *cam_fb, int *lap_fb, int width, int height); // hardware
//int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height); // hardware
void laplacian_filter_soft(volatile int *cam_fb, volatile int *lap_fb, long width, long height); // software
int main()
{
int *s, *h;
long x, y;
BITMAPFILEHEADER bmpfhr; // BMPファイルのファイルヘッダ(for Read)
BITMAPINFOHEADER bmpihr; // BMPファイルのINFOヘッダ(for Read)
FILE *fbmpr, *fbmpw;
int *hw_rd_bmp, *sw_rd_bmp, *hw_lapd, *sw_lapd;
int blue, green, red;
struct timeval start_time_hw, end_time_hw;
struct timeval start_time_sw, end_time_sw;
int temp;
if ((fbmpr = fopen("test.bmp", "rb")) == NULL){ // test.bmp をオープン
fprintf(stderr, "Can't open test.bmp by binary read mode\n");
exit(1);
}
// bmpヘッダの読み出し
fread(&bmpfhr.bfType, sizeof(char), 2, fbmpr);
fread(&bmpfhr.bfSize, sizeof(long), 1, fbmpr);
fread(&bmpfhr.bfReserved1, sizeof(short), 1, fbmpr);
fread(&bmpfhr.bfReserved2, sizeof(short), 1, fbmpr);
fread(&bmpfhr.bfOffBits, sizeof(long), 1, fbmpr);
fread(&bmpihr, sizeof(BITMAPINFOHEADER), 1, fbmpr);
// ピクセルを入れるメモリをアロケートする
if ((hw_rd_bmp =(int *)sds_alloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate rd_bmp memory\n");
exit(1);
}
if ((sw_rd_bmp =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate rd_bmp memory\n");
exit(1);
}
if ((hw_lapd =(int *)sds_alloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate hw_lapd memory\n");
exit(1);
}
//rd_bmp = (int *)sds_mmap((void *)(0x80000000), sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight), rd_bmp);
//hw_lapd = (int *)sds_mmap((void *)(0x80000000+(ALL_PIXEL_VALUE*sizeof(int))), sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight), hw_lapd);
if ((sw_lapd =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate sw_lapd memory\n");
exit(1);
}
// rd_bmp にBMPのピクセルを代入。その際に、行を逆転する必要がある
for (y=0; y<bmpihr.biHeight; y++){
for (x=0; x<bmpihr.biWidth; x++){
blue = fgetc(fbmpr);
green = fgetc(fbmpr);
red = fgetc(fbmpr);
hw_rd_bmp[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] = (blue & 0xff) | ((green & 0xff)<<8) | ((red & 0xff)<<16);
sw_rd_bmp[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] = (blue & 0xff) | ((green & 0xff)<<8) | ((red & 0xff)<<16);
}
}
fclose(fbmpr);
//lap_filter_axim(rd_bmp, hw_lapd, (int)bmpihr.biWidth, (int)bmpihr.biHeight); // ダミー実行(キャッシュを読み込む)
gettimeofday(&start_time_hw, NULL);
lap_filter_axim(hw_rd_bmp, hw_lapd, (int)bmpihr.biWidth, (int)bmpihr.biHeight); // ハードウェアのラプラシアン・フィルタ
gettimeofday(&end_time_hw, NULL);
gettimeofday(&start_time_sw, NULL);
laplacian_filter_soft(sw_rd_bmp, sw_lapd, bmpihr.biWidth, bmpihr.biHeight); // ソフトウェアのラプラシアン・フィルタ
gettimeofday(&end_time_sw, NULL);
// ハードウェアとソフトウェアのラプラシアン・フィルタの値のチェック
for (y=0, h=hw_lapd, s=sw_lapd; y<bmpihr.biHeight; y++){
for (x=0; x<bmpihr.biWidth; x++){
if (*h != *s){
printf("ERROR HW and SW results mismatch x = %ld, y = %ld, HW = %d, SW = %d\n", x, y, *h, *s);
return(1);
} else {
h++;
s++;
}
}
}
printf("Success HW and SW results match\n");
if (end_time_hw.tv_usec < start_time_hw.tv_usec) {
printf("lap_filter2 HW time = %ld.%06ld sec\n", end_time_hw.tv_sec - start_time_hw.tv_sec - 1, 1000000 + end_time_hw.tv_usec - start_time_hw.tv_usec);
}
else {
printf("lap_filter2 HW time = %ld.%06ld sec\n", end_time_hw.tv_sec - start_time_hw.tv_sec, end_time_hw.tv_usec - start_time_hw.tv_usec);
}
if (end_time_sw.tv_usec < start_time_sw.tv_usec) {
printf("lap_filter2 SW time = %ld.%06ld sec\n", end_time_sw.tv_sec - start_time_sw.tv_sec - 1, 1000000 + end_time_sw.tv_usec - start_time_sw.tv_usec);
}
else {
printf("lap_filter2 SW time = %ld.%06ld sec\n", end_time_sw.tv_sec - start_time_sw.tv_sec, end_time_sw.tv_usec - start_time_sw.tv_usec);
}
// ハードウェアのラプラシアンフィルタの結果を temp_lap.bmp へ出力する
if ((fbmpw=fopen("temp_lap.bmp", "wb")) == NULL){
fprintf(stderr, "Can't open temp_lap.bmp by binary write mode\n");
exit(1);
}
// BMPファイルヘッダの書き込み
fwrite(&bmpfhr.bfType, sizeof(char), 2, fbmpw);
fwrite(&bmpfhr.bfSize, sizeof(long), 1, fbmpw);
fwrite(&bmpfhr.bfReserved1, sizeof(short), 1, fbmpw);
fwrite(&bmpfhr.bfReserved2, sizeof(short), 1, fbmpw);
fwrite(&bmpfhr.bfOffBits, sizeof(long), 1, fbmpw);
fwrite(&bmpihr, sizeof(BITMAPINFOHEADER), 1, fbmpw);
// RGB データの書き込み、逆順にする
for (y=0; y<bmpihr.biHeight; y++){
for (x=0; x<bmpihr.biWidth; x++){
blue = hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] & 0xff;
green = (hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] >> 8) & 0xff;
red = (hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x]>>16) & 0xff;
fputc(blue, fbmpw);
fputc(green, fbmpw);
fputc(red, fbmpw);
}
}
fclose(fbmpw);
if (hw_rd_bmp) sds_free(hw_rd_bmp);
if (sw_rd_bmp) free(sw_rd_bmp);
if (hw_lapd) sds_free(hw_lapd);
if (sw_lapd) free(sw_lapd);
return(0);
}
void laplacian_filter_soft(volatile int *cam_fb, volatile int *lap_fb, long width, long height)
{
unsigned int **line_buf;
unsigned int *lap_buf;
int x, y, i;
int lap_fil_val;
int a, b;
int fl, sl, tl;
// line_buf の1次元目の配列をアロケートする
if ((line_buf =(unsigned int **)malloc(sizeof(unsigned int *) * 3)) == NULL){
fprintf(stderr, "Can't allocate line_buf[3][]\n");
exit(1);
}
// メモリをアロケートする
for (i=0; i<3; i++){
if ((line_buf[i]=(unsigned int *)malloc(sizeof(unsigned int) * width)) == NULL){
fprintf(stderr, "Can't allocate line_buf[%d]\n", i);
exit(1);
}
}
if ((lap_buf=(unsigned int *)malloc(sizeof(unsigned int) * (width))) == NULL){
fprintf(stderr, "Can't allocate lap_buf memory\n");
exit(1);
}
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=0; y<height; y++){
for (x=0; x<width; x++){
if (y==0 || y==height-1){ // 縦の境界の時の値は0とする
lap_fil_val = 0;
}else if (x==0 || x==width-1){ // 横の境界の時も値は0とする
lap_fil_val = 0;
}else{
if (y == 1 && x == 1){ // 最初のラインの最初のピクセルでは2ライン分の画素を読み出す
for (a=0; a<2; a++){ // 2ライン分
for (b=0; b<width; b++){ // ライン
line_buf[a][b] = cam_fb[(a*width)+b];
line_buf[a][b] = conv_rgb2y_soft(line_buf[a][b]);
}
}
}
if (x == 1) { // ラインの最初なので、2つのピクセルを読み込む
for (b=0; b<2; b++){ // ライン
line_buf[(y+1)%3][b] = cam_fb[((y+1)*width)+b];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][b] = conv_rgb2y_soft(line_buf[(y+1)%3][b]);
}
}
// 1つのピクセルを読み込みながらラプラシアン・フィルタを実行する
line_buf[(y+1)%3][x+1] = cam_fb[((y+1)*width)+(x+1)];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][x+1] = conv_rgb2y_soft(line_buf[(y+1)%3][x+1]);
fl = (y-1)%3; // 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
sl = y%3; // 2番めのライン
tl = (y+1)%3; // 3番目のライン
lap_fil_val = laplacian_fil_soft(line_buf[fl][x-1], line_buf[fl][x], line_buf[fl][x+1], line_buf[sl][x-1], line_buf[sl][x], line_buf[sl][x+1], line_buf[tl][x-1], line_buf[tl][x], line_buf[tl][x+1]);
}
// ラプラシアンフィルタ・データの書き込み
lap_fb[(y*width)+x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val ;
}
}
if(lap_buf) free(lap_buf);
for (i=0; i<3; i++)
if (line_buf[i]) free(line_buf[i]);
if (line_buf) free(line_buf);
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y_soft(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil_soft(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
// laplacian_filter2.c
// lap_filter_axim()
#include <stdio.h>
#include <string.h>
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
//int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height)
int lap_filter_axim(int *cam_fb, int *lap_fb, int width, int height)
{
#pragma HLS INTERFACE m_axi port=cam_fb depth=800 offset=slave
#pragma HLS INTERFACE m_axi port=lap_fb depth=800 offset=slave
#pragma HLS INTERFACE s_axilite port=return
unsigned int line_buf[3][ALL_PIXEL_VALUE];
unsigned int lap_buf[ALL_PIXEL_VALUE];
int x, y, i;
int lap_fil_val;
int a, b;
int fl, sl, tl;
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=0; y<height; y++){
for (x=0; x<width; x++){
if (y==0 || y==height-1){ // 縦の境界の時の値は0とする
lap_fil_val = 0;
}else if (x==0 || x==width-1){ // 横の境界の時も値は0とする
lap_fil_val = 0;
}else{
if (y == 1 && x == 1){ // 最初のラインの最初のピクセルでは2ライン分の画素を読み出す
for (a=0; a<2; a++){ // 2ライン分
for (b=0; b<width; b++){ // ライン
line_buf[a][b] = cam_fb[(a*width)+b];
line_buf[a][b] = conv_rgb2y(line_buf[a][b]);
}
}
}
if (x == 1) { // ラインの最初なので、2つのピクセルを読み込む
for (b=0; b<2; b++){ // ライン
line_buf[(y+1)%3][b] = cam_fb[((y+1)*width)+b];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][b] = conv_rgb2y(line_buf[(y+1)%3][b]);
}
}
// 1つのピクセルを読み込みながらラプラシアン・フィルタを実行する
line_buf[(y+1)%3][x+1] = cam_fb[((y+1)*width)+(x+1)];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][x+1] = conv_rgb2y(line_buf[(y+1)%3][x+1]);
fl = (y-1)%3; // 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
sl = y%3; // 2番めのライン
tl = (y+1)%3; // 3番目のライン
lap_fil_val = laplacian_fil(line_buf[fl][x-1], line_buf[fl][x], line_buf[fl][x+1], line_buf[sl][x-1], line_buf[sl][x], line_buf[sl][x+1], line_buf[tl][x-1], line_buf[tl][x], line_buf[tl][x+1]);
}
// ラプラシアンフィルタ・データの書き込み
lap_fb[(y*width)+x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val ;
}
}
return 0;
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
U-Boot 2014.07-00005-gd2f850a (Jan 27 2015 - 13:33:33)
Board: Xilinx Zynq
I2C: ready
DRAM: ECC disabled 512 MiB
MMC: zynq_sdhci: 0
SF: Detected S25FL128S_64K with page size 256 Bytes, erase size 64 KiB, total 16 MiB
*** Warning - bad CRC, using default environment
In: serial
Out: serial
Err: serial
Net: Gem.e000b000
Hit any key to stop autoboot: 0
Device: zynq_sdhci
Manufacturer ID: 74
OEM: 4a45
Name: USD
Tran Speed: 50000000
Rd Block Len: 512
SD version 3.0
High Capacity: Yes
Capacity: 7.5 GiB
Bus Width: 4-bit
reading uEnv.txt
** Unable to read file uEnv.txt **
Copying Linux from SD to RAM...
reading uImage
3488184 bytes read in 344 ms (9.7 MiB/s)
reading devicetree.dtb
8438 bytes read in 52 ms (158.2 KiB/s)
reading uramdisk.image.gz
5044192 bytes read in 472 ms (10.2 MiB/s)
## Booting kernel from Legacy Image at 02080000 ...
Image Name: Linux-3.19.0-xilinx-apf
Image Type: ARM Linux Kernel Image (uncompressed)
Data Size: 3488120 Bytes = 3.3 MiB
Load Address: 00008000
Entry Point: 00008000
Verifying Checksum ... OK
## Loading init Ramdisk from Legacy Image at 04000000 ...
Image Name:
Image Type: ARM Linux RAMDisk Image (gzip compressed)
Data Size: 5044128 Bytes = 4.8 MiB
Load Address: 00000000
Entry Point: 00000000
Verifying Checksum ... OK
## Flattened Device Tree blob at 02000000
Booting using the fdt blob at 0x2000000
Loading Kernel Image ... OK
Loading Ramdisk to 1e855000, end 1ed247a0 ... OK
Loading Device Tree to 1e84f000, end 1e8540f5 ... OK
Starting kernel ...
Booting Linux on physical CPU 0x0
Linux version 3.19.0-xilinx-apf (yogeshc@xsjpsgv105) (gcc version 4.9.1 (Sourcery CodeBench Lite 2014.11-30) ) #22 SMP PREEMPT Mon Jul 6 17:59:10 PDT 2015
CPU: ARMv7 Processor [413fc090] revision 0 (ARMv7), cr=18c5387d
CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
Machine model: Zynq ZYBO Development Board
cma: Reserved 256 MiB at 0x0e800000
Memory policy: Data cache writealloc
PERCPU: Embedded 9 pages/cpu @5fb5d000 s8128 r8192 d20544 u36864
Built 1 zonelists in Zone order, mobility grouping on. Total pages: 130048
Kernel command line: console=ttyPS0,115200 earlyprintk
PID hash table entries: 2048 (order: 1, 8192 bytes)
Dentry cache hash table entries: 65536 (order: 6, 262144 bytes)
Inode-cache hash table entries: 32768 (order: 5, 131072 bytes)
Memory: 244956K/524288K available (4708K kernel code, 251K rwdata, 1644K rodata, 220K init, 250K bss, 17188K reserved, 262144K cma-reserved, 0K highmem)
Virtual kernel memory layout:
vector : 0xffff0000 - 0xffff1000 ( 4 kB)
fixmap : 0xffc00000 - 0xfff00000 (3072 kB)
vmalloc : 0x60800000 - 0xff000000 (2536 MB)
lowmem : 0x40000000 - 0x60000000 ( 512 MB)
pkmap : 0x3fe00000 - 0x40000000 ( 2 MB)
modules : 0x3f000000 - 0x3fe00000 ( 14 MB)
.text : 0x40008000 - 0x4063c614 (6354 kB)
.init : 0x4063d000 - 0x40674000 ( 220 kB)
.data : 0x40674000 - 0x406b2f20 ( 252 kB)
.bss : 0x406b2f20 - 0x406f1ab8 ( 251 kB)
Preemptible hierarchical RCU implementation.
RCU restricting CPUs from NR_CPUS=4 to nr_cpu_ids=2.
RCU: Adjusting geometry for rcu_fanout_leaf=16, nr_cpu_ids=2
NR_IRQS:16 nr_irqs:16 16
L2C-310 erratum 769419 enabled
L2C-310 enabling early BRESP for Cortex-A9
L2C-310 full line of zeros enabled for Cortex-A9
L2C-310 ID prefetch enabled, offset 1 lines
L2C-310 dynamic clock gating enabled, standby mode enabled
L2C-310 cache controller enabled, 8 ways, 512 kB
L2C-310: CACHE_ID 0x410000c8, AUX_CTRL 0x76360001
slcr mapped to 60804000
zynq_clock_init: clkc starts at 60804100
Zynq clock init
sched_clock: 64 bits at 325MHz, resolution 3ns, wraps every 3383112499200ns
timer #0 at 60806000, irq=17
Console: colour dummy device 80x30
Calibrating delay loop... 1292.69 BogoMIPS (lpj=6463488)
pid_max: default: 32768 minimum: 301
Mount-cache hash table entries: 1024 (order: 0, 4096 bytes)
Mountpoint-cache hash table entries: 1024 (order: 0, 4096 bytes)
CPU: Testing write buffer coherency: ok
CPU0: thread -1, cpu 0, socket 0, mpidr 80000000
Setting up static identity map for 0x475660 - 0x4756b8
CPU1: thread -1, cpu 1, socket 0, mpidr 80000001
Brought up 2 CPUs
SMP: Total of 2 processors activated (2591.94 BogoMIPS).
CPU: All CPU(s) started in SVC mode.
devtmpfs: initialized
VFP support v0.3: implementor 41 architecture 3 part 30 variant 9 rev 4
NET: Registered protocol family 16
DMA: preallocated 256 KiB pool for atomic coherent allocations
cpuidle: using governor ladder
cpuidle: using governor menu
hw-breakpoint: found 5 (+1 reserved) breakpoint and 1 watchpoint registers.
hw-breakpoint: maximum watchpoint size is 4 bytes.
zynq-ocm f800c000.ocmc: ZYNQ OCM pool: 256 KiB @ 0x60880000
vgaarb: loaded
SCSI subsystem initialized
usbcore: registered new interface driver usbfs
usbcore: registered new interface driver hub
usbcore: registered new device driver usb
media: Linux media interface: v0.10
Linux video capture interface: v2.00
pps_core: LinuxPPS API ver. 1 registered
pps_core: Software ver. 5.3.6 - Copyright 2005-2007 Rodolfo Giometti <giometti@linux.it>
PTP clock support registered
EDAC MC: Ver: 3.0.0
Advanced Linux Sound Architecture Driver Initialized.
Switched to clocksource arm_global_timer
NET: Registered protocol family 2
TCP established hash table entries: 4096 (order: 2, 16384 bytes)
TCP bind hash table entries: 4096 (order: 3, 32768 bytes)
TCP: Hash tables configured (established 4096 bind 4096)
TCP: reno registered
UDP hash table entries: 256 (order: 1, 8192 bytes)
UDP-Lite hash table entries: 256 (order: 1, 8192 bytes)
NET: Registered protocol family 1
RPC: Registered named UNIX socket transport module.
RPC: Registered udp transport module.
RPC: Registered tcp transport module.
RPC: Registered tcp NFSv4.1 backchannel transport module.
Trying to unpack rootfs image as initramfs...
Freeing initrd memory: 4928K (5e855000 - 5ed25000)
hw perfevents: enabled with armv7_cortex_a9 PMU driver, 7 counters available
futex hash table entries: 512 (order: 3, 32768 bytes)
jffs2: version 2.2. (NAND) (SUMMARY) c 2001-2006 Red Hat, Inc.
io scheduler noop registered
io scheduler deadline registered
io scheduler cfq registered (default)
dma-pl330 f8003000.dmac: Loaded driver for PL330 DMAC-241330
dma-pl330 f8003000.dmac: DBUFF-128x8bytes Num_Chans-8 Num_Peri-4 Num_Events-16
e0001000.serial: ttyPS0 at MMIO 0xe0001000 (irq = 142, base_baud = 3125000) is a xuartps
console [ttyPS0] enabled
xdevcfg f8007000.devcfg: ioremap 0xf8007000 to 6086c000
[drm] Initialized drm 1.1.0 20060810
brd: module loaded
loop: module loaded
CAN device driver interface
libphy: MACB_mii_bus: probed
macb e000b000.ethernet eth0: Cadence GEM rev 0x00020118 at 0xe000b000 irq 143 (00:0a:35:00:01:22)
macb e000b000.ethernet eth0: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e000b000.etherne:00, irq=-1)
e1000e: Intel(R) PRO/1000 Network Driver - 2.3.2-k
e1000e: Copyright(c) 1999 - 2014 Intel Corporation.
ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver
ehci-pci: EHCI PCI platform driver
usbcore: registered new interface driver usb-storage
mousedev: PS/2 mouse device common for all mice
i2c /dev entries driver
zynq-edac f8006000.memory-controller: ecc not enabled
Xilinx Zynq CpuIdle Driver started
Driver 'mmcblk' needs updating - please use bus_type methods
sdhci: Secure Digital Host Controller Interface driver
sdhci: Copyright(c) Pierre Ossman
sdhci-pltfm: SDHCI platform and OF driver helper
sdhci-arasan e0100000.sdhci: No vmmc regulator found
sdhci-arasan e0100000.sdhci: No vqmmc regulator found
mmc0: SDHCI controller on e0100000.sdhci [e0100000.sdhci] using ADMA
ledtrig-cpu: registered to indicate activity on CPUs
usbcore: registered new interface driver usbhid
usbhid: USB HID core driver
xlnk major 245
xlnk driver loaded
xlnk_pdev is not null
TCP: cubic registered
NET: Registered protocol family 17
can: controller area network core (rev 20120528 abi 9)
NET: Registered protocol family 29
can: raw protocol (rev 20120528)
can: broadcast manager protocol (rev 20120528 t)
can: netlink gateway (rev 20130117) max_hops=1
Registering SWP/SWPB emulation handler
drivers/rtc/hctosys.c: unable to open rtc device (rtc0)
ALSA device list:
No soundcards found.
Freeing unused kernel memory: 220K (4063d000 - 40674000)
INIT: mmc0: new high speed SDHC card at address b368
mmcblk0: mmc0:b368 USD 7.45 GiB
mmcblk0: p1 p2
version 2.88 booting
mkdir: can't create directory '/run/media/mmcblk0p1': No such file or directory
mount: mounting /dev/mmcblk0p1 on /run/media/mmcblk0p1 failed: No such file or directory
mkdir: can't create directory '/run/media/mmcblk0p2': No such file or directory
mount: mounting /dev/mmcblk0p2 on /run/media/mmcblk0p2 failed: No such file or directory
Creating /dev/flash/* device nodes
random: dd urandom read with 1 bits of entropy available
starting Busybox inet Daemon: inetd... done.
update-rc.d: /etc/init.d/run-postinsts exists during rc.d purge (continuing)
Removing any system startup links for run-postinsts ...
/etc/rcS.d/S99run-postinsts
INIT: Entering runlevel: 5
Configuring network interfaces... udhcpc (v1.23.1) started
Sending discover...
Sending discover...
Sending select for 192.168.3.63...
Lease of 192.168.3.63 obtained, lease time 86400
/etc/udhcpc.d/50default: Adding DNS 192.168.3.1
done.
Starting Dropbear SSH server: Generating key, this may take a while...
Public key portion is:
省略
dropbear.
Starting tcf-agent: OK
sh-4.3#
ERROR: [SDSoC 0-0] Function "lap_filter_axim" argument "cam_fb" is mapped to RAM interface, but it's size is bigger than 16384. Please specify #pragma SDS data zero_copy(cam_fb) or #pragma SDS data access_pattern(cam_fb:SEQUENTIAL)
#pragma SDS data zero_copy(cam_fb[0:ALL_PIXEL_VALUE])
#pragma SDS data zero_copy(lap_fb[0:ALL_PIXEL_VALUE])
int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height)
{
ERROR: [SDSoC 0-0] Hardware function 'lap_filter_axim' BRAM resource requirement (1024) exceeds platform 'zybo' resource capacity (60)
#pragma SDS data access_pattern(cam_fb:SEQUENTIAL)
#pragma SDS data access_pattern(lap_fb:SEQUENTIAL)
int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height)
{
で止まっている。ERROR: [SDSoC 0-0] Hardware function 'lap_filter_axim' BRAM resource requirement (1024) exceeds platform 'zybo' resource capacity (60)
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
//#define HORIZONTAL_PIXEL_WIDTH 64
//#define VERTICAL_PIXEL_WIDTH 48
#define HORIZONTAL_PIXEL_WIDTH 800
#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
/* * bmp_header.h * * Created on: 2015/08/03 * Author: Masaaki */
// bmp_header.h
// 2015/07/17 by Masaaki Ono
//
// BMP ファイルフォーマットから引用
// http://www.kk.iij4u.or.jp/~kondo/bmp/
//
#include <stdio.h>
#pragma once
#ifndef BMP_HEADER_H_
#define BMP_HEADER_H_
// TODO: プログラムに必要な追加ヘッダーをここで参照してください。
// BITMAPFILEHEADER 14bytes
typedef struct tagBITMAPFILEHEADER {
unsigned short bfType;
unsigned long bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
unsigned long bfOffBits;
} BITMAPFILEHEADER;
// BITMAPINFOHEADER 40bytes
typedef struct tagBITMAPINFOHEADER{
unsigned long biSize;
long biWidth;
long biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
unsigned long biCompression;
unsigned long biSizeImage;
long biXPixPerMeter;
long biYPixPerMeter;
unsigned long biClrUsed;
unsigned long biClrImporant;
} BITMAPINFOHEADER;
typedef struct BMP24bitsFORMAT {
unsigned char blue;
unsigned char green;
unsigned char red;
} BMP24FORMAT;
#endif /* BMP_HEADER_H_ */
// Testbench of laplacian_filter.c
// BMPデータをハードウェアとソフトウェアで、ラプラシアン・フィルタを掛けて、それを比較する
//
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "bmp_header.h"
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil_soft(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y_soft(int rgb);
int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height); // hardware
void laplacian_filter_soft(volatile int *cam_fb, volatile int *lap_fb, long width, long height); // software
int main()
{
int *s, *h;
long x, y;
BITMAPFILEHEADER bmpfhr; // BMPファイルのファイルヘッダ(for Read)
BITMAPINFOHEADER bmpihr; // BMPファイルのINFOヘッダ(for Read)
FILE *fbmpr, *fbmpw;
int *rd_bmp, *hw_lapd, *sw_lapd;
int blue, green, red;
struct timeval start_time_hw, end_time_hw;
struct timeval start_time_sw, end_time_sw;
if ((fbmpr = fopen("test.bmp", "rb")) == NULL){ // test.bmp をオープン
fprintf(stderr, "Can't open test.bmp by binary read mode\n");
exit(1);
}
// bmpヘッダの読み出し
fread(&bmpfhr.bfType, sizeof(char), 2, fbmpr);
fread(&bmpfhr.bfSize, sizeof(long), 1, fbmpr);
fread(&bmpfhr.bfReserved1, sizeof(short), 1, fbmpr);
fread(&bmpfhr.bfReserved2, sizeof(short), 1, fbmpr);
fread(&bmpfhr.bfOffBits, sizeof(long), 1, fbmpr);
fread(&bmpihr, sizeof(BITMAPINFOHEADER), 1, fbmpr);
// ピクセルを入れるメモリをアロケートする
if ((rd_bmp =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate rd_bmp memory\n");
exit(1);
}
if ((hw_lapd =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate hw_lapd memory\n");
exit(1);
}
if ((sw_lapd =(int *)malloc(sizeof(int) * (bmpihr.biWidth * bmpihr.biHeight))) == NULL){
fprintf(stderr, "Can't allocate sw_lapd memory\n");
exit(1);
}
// rd_bmp にBMPのピクセルを代入。その際に、行を逆転する必要がある
for (y=0; y<bmpihr.biHeight; y++){
for (x=0; x<bmpihr.biWidth; x++){
blue = fgetc(fbmpr);
green = fgetc(fbmpr);
red = fgetc(fbmpr);
rd_bmp[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] = (blue & 0xff) | ((green & 0xff)<<8) | ((red & 0xff)<<16);
}
}
fclose(fbmpr);
gettimeofday(&start_time_hw, NULL);
lap_filter_axim(rd_bmp, hw_lapd, (int)bmpihr.biWidth, (int)bmpihr.biHeight); // ハードウェアのラプラシアン・フィルタ
gettimeofday(&end_time_hw, NULL);
gettimeofday(&start_time_sw, NULL);
laplacian_filter_soft(rd_bmp, sw_lapd, bmpihr.biWidth, bmpihr.biHeight); // ソフトウェアのラプラシアン・フィルタ
gettimeofday(&end_time_sw, NULL);
// ハードウェアとソフトウェアのラプラシアン・フィルタの値のチェック
for (y=0, h=hw_lapd, s=sw_lapd; y<bmpihr.biHeight; y++){
for (x=0; x<bmpihr.biWidth; x++){
if (*h != *s){
printf("ERROR HW and SW results mismatch x = %ld, y = %ld, HW = %d, SW = %d\n", x, y, *h, *s);
return(1);
} else {
h++;
s++;
}
}
}
printf("Success HW and SW results match\n");
if (end_time_hw.tv_usec < start_time_hw.tv_usec) {
printf("lap_filter2 HW time = %ld.%06ld sec\n", end_time_hw.tv_sec - start_time_hw.tv_sec - 1, 1000000 + end_time_hw.tv_usec - start_time_hw.tv_usec);
}
else {
printf("lap_filter2 HW time = %ld.%06ld sec\n", end_time_hw.tv_sec - start_time_hw.tv_sec, end_time_hw.tv_usec - start_time_hw.tv_usec);
}
if (end_time_sw.tv_usec < start_time_sw.tv_usec) {
printf("lap_filter2 SW time = %ld.%06ld sec\n", end_time_sw.tv_sec - start_time_sw.tv_sec - 1, 1000000 + end_time_sw.tv_usec - start_time_sw.tv_usec);
}
else {
printf("lap_filter2 SW time = %ld.%06ld sec\n", end_time_sw.tv_sec - start_time_sw.tv_sec, end_time_sw.tv_usec - start_time_sw.tv_usec);
}
// ハードウェアのラプラシアンフィルタの結果を temp_lap.bmp へ出力する
if ((fbmpw=fopen("temp_lap.bmp", "wb")) == NULL){
fprintf(stderr, "Can't open temp_lap.bmp by binary write mode\n");
exit(1);
}
// BMPファイルヘッダの書き込み
fwrite(&bmpfhr.bfType, sizeof(char), 2, fbmpw);
fwrite(&bmpfhr.bfSize, sizeof(long), 1, fbmpw);
fwrite(&bmpfhr.bfReserved1, sizeof(short), 1, fbmpw);
fwrite(&bmpfhr.bfReserved2, sizeof(short), 1, fbmpw);
fwrite(&bmpfhr.bfOffBits, sizeof(long), 1, fbmpw);
fwrite(&bmpihr, sizeof(BITMAPINFOHEADER), 1, fbmpw);
// RGB データの書き込み、逆順にする
for (y=0; y<bmpihr.biHeight; y++){
for (x=0; x<bmpihr.biWidth; x++){
blue = hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] & 0xff;
green = (hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x] >> 8) & 0xff;
red = (hw_lapd[((bmpihr.biHeight-1)-y)*bmpihr.biWidth+x]>>16) & 0xff;
fputc(blue, fbmpw);
fputc(green, fbmpw);
fputc(red, fbmpw);
}
}
fclose(fbmpw);
if (rd_bmp) free(rd_bmp);
if (hw_lapd) free(hw_lapd);
if (sw_lapd) free(sw_lapd);
return(0);
}
void laplacian_filter_soft(volatile int *cam_fb, volatile int *lap_fb, long width, long height)
{
unsigned int **line_buf;
unsigned int *lap_buf;
int x, y, i;
int lap_fil_val;
int a, b;
int fl, sl, tl;
// line_buf の1次元目の配列をアロケートする
if ((line_buf =(unsigned int **)malloc(sizeof(unsigned int *) * 3)) == NULL){
fprintf(stderr, "Can't allocate line_buf[3][]\n");
exit(1);
}
// メモリをアロケートする
for (i=0; i<3; i++){
if ((line_buf[i]=(unsigned int *)malloc(sizeof(unsigned int) * width)) == NULL){
fprintf(stderr, "Can't allocate line_buf[%d]\n", i);
exit(1);
}
}
if ((lap_buf=(unsigned int *)malloc(sizeof(unsigned int) * (width))) == NULL){
fprintf(stderr, "Can't allocate lap_buf memory\n");
exit(1);
}
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=0; y<height; y++){
for (x=0; x<width; x++){
if (y==0 || y==height-1){ // 縦の境界の時の値は0とする
lap_fil_val = 0;
}else if (x==0 || x==width-1){ // 横の境界の時も値は0とする
lap_fil_val = 0;
}else{
if (y == 1 && x == 1){ // 最初のラインの最初のピクセルでは2ライン分の画素を読み出す
for (a=0; a<2; a++){ // 2ライン分
for (b=0; b<width; b++){ // ライン
line_buf[a][b] = cam_fb[(a*width)+b];
line_buf[a][b] = conv_rgb2y_soft(line_buf[a][b]);
}
}
}
if (x == 1) { // ラインの最初なので、2つのピクセルを読み込む
for (b=0; b<2; b++){ // ライン
line_buf[(y+1)%3][b] = cam_fb[((y+1)*width)+b];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][b] = conv_rgb2y_soft(line_buf[(y+1)%3][b]);
}
}
// 1つのピクセルを読み込みながらラプラシアン・フィルタを実行する
line_buf[(y+1)%3][x+1] = cam_fb[((y+1)*width)+(x+1)];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][x+1] = conv_rgb2y_soft(line_buf[(y+1)%3][x+1]);
fl = (y-1)%3; // 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
sl = y%3; // 2番めのライン
tl = (y+1)%3; // 3番目のライン
lap_fil_val = laplacian_fil_soft(line_buf[fl][x-1], line_buf[fl][x], line_buf[fl][x+1], line_buf[sl][x-1], line_buf[sl][x], line_buf[sl][x+1], line_buf[tl][x-1], line_buf[tl][x], line_buf[tl][x+1]);
}
// ラプラシアンフィルタ・データの書き込み
lap_fb[(y*width)+x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val ;
}
}
if(lap_buf) free(lap_buf);
for (i=0; i<3; i++)
if (line_buf[i]) free(line_buf[i]);
if (line_buf) free(line_buf);
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y_soft(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil_soft(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
// laplacian_filter2.c
// lap_filter_axim()
#include <stdio.h>
#include <string.h>
#define HORIZONTAL_PIXEL_WIDTH 64
#define VERTICAL_PIXEL_WIDTH 48
//#define HORIZONTAL_PIXEL_WIDTH 800
//#define VERTICAL_PIXEL_WIDTH 600
#define ALL_PIXEL_VALUE (HORIZONTAL_PIXEL_WIDTH*VERTICAL_PIXEL_WIDTH)
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2);
int conv_rgb2y(int rgb);
int lap_filter_axim(int cam_fb[ALL_PIXEL_VALUE], int lap_fb[ALL_PIXEL_VALUE], int width, int height)
{
unsigned int line_buf[3][ALL_PIXEL_VALUE];
unsigned int lap_buf[ALL_PIXEL_VALUE];
int x, y, i;
int lap_fil_val;
int a, b;
int fl, sl, tl;
// RGB値をY(輝度成分)のみに変換し、ラプラシアンフィルタを掛けた。
for (y=0; y<height; y++){
for (x=0; x<width; x++){
if (y==0 || y==height-1){ // 縦の境界の時の値は0とする
lap_fil_val = 0;
}else if (x==0 || x==width-1){ // 横の境界の時も値は0とする
lap_fil_val = 0;
}else{
if (y == 1 && x == 1){ // 最初のラインの最初のピクセルでは2ライン分の画素を読み出す
for (a=0; a<2; a++){ // 2ライン分
for (b=0; b<width; b++){ // ライン
line_buf[a][b] = cam_fb[(a*width)+b];
line_buf[a][b] = conv_rgb2y(line_buf[a][b]);
}
}
}
if (x == 1) { // ラインの最初なので、2つのピクセルを読み込む
for (b=0; b<2; b++){ // ライン
line_buf[(y+1)%3][b] = cam_fb[((y+1)*width)+b];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][b] = conv_rgb2y(line_buf[(y+1)%3][b]);
}
}
// 1つのピクセルを読み込みながらラプラシアン・フィルタを実行する
line_buf[(y+1)%3][x+1] = cam_fb[((y+1)*width)+(x+1)];
// (y+1)%3 は、使用済みのラインがに読み込む、y=2 の時 line[0], y=3の時 line[1], y=4の時 line[2]
line_buf[(y+1)%3][x+1] = conv_rgb2y(line_buf[(y+1)%3][x+1]);
fl = (y-1)%3; // 最初のライン, y=1 012, y=2 120, y=3 201, y=4 012
sl = y%3; // 2番めのライン
tl = (y+1)%3; // 3番目のライン
lap_fil_val = laplacian_fil(line_buf[fl][x-1], line_buf[fl][x], line_buf[fl][x+1], line_buf[sl][x-1], line_buf[sl][x], line_buf[sl][x+1], line_buf[tl][x-1], line_buf[tl][x], line_buf[tl][x+1]);
}
// ラプラシアンフィルタ・データの書き込み
lap_fb[(y*width)+x] = (lap_fil_val<<16)+(lap_fil_val<<8)+lap_fil_val ;
}
}
return 0;
}
// RGBからYへの変換
// RGBのフォーマットは、{8'd0, R(8bits), G(8bits), B(8bits)}, 1pixel = 32bits
// 輝度信号Yのみに変換する。変換式は、Y = 0.299R + 0.587G + 0.114B
// "YUVフォーマット及び YUV<->RGB変換"を参考にした。http://vision.kuee.kyoto-u.ac.jp/~hiroaki/firewire/yuv.html
// 2013/09/27 : float を止めて、すべてint にした
int conv_rgb2y(int rgb){
int r, g, b, y_f;
int y;
b = rgb & 0xff;
g = (rgb>>8) & 0xff;
r = (rgb>>16) & 0xff;
y_f = 77*r + 150*g + 29*b; //y_f = 0.299*r + 0.587*g + 0.114*b;の係数に256倍した
y = y_f >> 8; // 256で割る
return(y);
}
// ラプラシアンフィルタ
// x0y0 x1y0 x2y0 -1 -1 -1
// x0y1 x1y1 x2y1 -1 8 -1
// x0y2 x1y2 x2y2 -1 -1 -1
int laplacian_fil(int x0y0, int x1y0, int x2y0, int x0y1, int x1y1, int x2y1, int x0y2, int x1y2, int x2y2)
{
int y;
y = -x0y0 -x1y0 -x2y0 -x0y1 +8*x1y1 -x2y1 -x0y2 -x1y2 -x2y2;
if (y<0)
y = 0;
else if (y>255)
y = 255;
return(y);
}
日 | 月 | 火 | 水 | 木 | 金 | 土 |
---|---|---|---|---|---|---|
- | - | - | - | - | - | 1 |
2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 |
16 | 17 | 18 | 19 | 20 | 21 | 22 |
23 | 24 | 25 | 26 | 27 | 28 | 29 |
30 | 31 | - | - | - | - | - |