利用 matlab的deep learning toolbox工具箱实现3张数字图片的检测。

首先将三张图片归一到28*28,数字呈现白色的灰度图像,将该矩阵转置(在修改完mnist数据库然后重新训练后就不用对测试的数据进行转置了),然后将其整合到一行向量中,共3行(前三行分别为数字3 2 1),数据文件名为new3.mat。因为mnist里面也是这么做的。

然后重写自己的example文件和test文件。

example.m

clear all; close all; clc;  
addpath('../data');  
addpath('../util');  
load mnist_uint8;
load new3;

  
train_x1 = double(reshape(train_x',28,28,60000))/255;  
test_x1 = double(reshape(test_x',28,28,10000))/255;  
train_y1 = double(train_y');  
test_y1 = double(test_y');  
newtest3 = double(reshape(new3',28,28,3))/255;  
%% ex1   
%will run 1 epoch in about 200 second and get around 11% error.   
%With 100 epochs you'll get around 1.2% error  
  
cnn.layers = {  
    struct('type', 'i') %input layer  
    struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer  
    struct('type', 's', 'scale', 2) %sub sampling layer  
    struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer  
    struct('type', 's', 'scale', 2) %subsampling layer  
};  
  
% 这里把cnn的设置给cnnsetup,它会据此构建一个完整的CNN网络,并返回  
cnn = cnnsetup(cnn, train_x1, train_y1);  
  
% 学习率  
opts.alpha = 1;  
% 每次挑出一个batchsize的batch来训练,也就是每用batchsize个样本就调整一次权值,而不是  
% 把所有样本都输入了,计算所有样本的误差了才调整一次权值  
opts.batchsize = 50;   
% 训练次数,用同样的样本集。我训练的时候:  
% 1的时候 11.41% error  
% 5的时候 4.2% error  
% 10的时候 2.73% error  
opts.numepochs = 10;  %可为1,但是有可能会降低网络的准确性,使得检测的数字错误。
  
% 然后开始把训练样本给它,开始训练这个CNN网络  
cnn = cnntrain(cnn, train_x1, train_y1, opts);  
  
% 然后就用测试样本来测试  
%[er, bad,h] = cnntest(cnn, test_x1, test_y1);  
[h] = newtest(cnn, newtest3);
%plot mean squared error  
plot(cnn.rL);  
%show test error  
%disp([num2str(er*100) '% error']); 
disp([num2str(h) ' out' ]);



newtest.m

function [h] = newtest(net, x)
    %  feedforward
    net = cnnff(net, x);
    [~, h] = max(net.o);
    h=h-1;
    %[~, a] = max(y);
   % bad = find(h ~= a);

%    err = numel(bad) / size(y, 2);
end



在实现两位数时,无连笔的情况可以实现,只是有连笔时:中间两点取平均值的方法该如何应用,才使得效果好。



MATLAB数字识别新写文件

%yjy76.m

clear all; close all; clc;  
addpath('../data'); 
addpath('../util');  
load('yjycnn.mat');

 A=cell(1,9);
for i=1:9
       A{i}=imread(['E:\matlab_project\DeepLearnToolbox-master\image\t',num2str(i),'.jpg']);
       testx=A{i};
       [out,tempt] = yjydigit( testx );
disp([num2str(out) ' out' ]); 
end

%yjydigit.m

function [out,tempt] = yjydigit( x )
x=imread('11.jpg');
a=rgb2gray(x);
t=graythresh(a);
bw=im2bw(a,t);
imshow(bw);
bw=1-bw;
i=10;
[m,n]=find(bw==1);
big=max(m);
small=min(m);
left=min(n);
right=max(n);
while(sum(bw(:,i))>=1)
    i=i+1;
end
j=i;
while(sum(bw(:,j))==0)
    j=j+1;
end
temp1=bw(small-2:big+1,left-2:i);
temp2=bw(small-2:big+1,j:right);
temp1=imresize(temp1,[28,28]);
temp2=imresize(temp2,[28,28]);

t=graythresh(temp1);
temp1=im2bw(temp1,t);

t=graythresh(temp2);
temp2=im2bw(temp2,t);

temp1=temp1(:)';
temp2=temp2(:)';
tempt=[temp1;temp2];
tempt=255*tempt;
load('yjycnn.mat')
 
newtest3 = double(reshape(tempt',28,28,2))/255;
[h] = newtest(yjycnn, newtest3);
out=h(1)*10+h(2);
end

%yjyapply.m

clear all; close all; clc;  
addpath('../data'); 
addpath('../util');  
load new12;
load('yjycnn.mat')
newtest3 = double(reshape(new12',28,28,12))/255;
%% ex1   
[h] = newtest(yjycnn, newtest3);
disp([num2str(h) ' out' ]);

%newtest.m

function [h] = newtest(net, x)
    %  feedforward
    net = cnnff(net, x);
    [~, h] = max(net.o);
    h=h-1;
    %[~, a] = max(y);
   % bad = find(h ~= a);

%    err = numel(bad) / size(y, 2);
end

%yjyexample.m

clear all; close all; clc;  
addpath('../data');  
addpath('../util');  
load yjydb;
load new10;
%load new2;
  
train_x1 = double(reshape(yjytrain_x',28,28,60000))/255;  
test_x1 = double(reshape(yjytest_x',28,28,10000))/255;  
train_y1 = double(train_y');  
test_y1 = double(test_y');  
%newtest3 = double(reshape(new10',28,28,10))/255;  
newtest3 = double(reshape(new10',28,28,10))/255;
%% ex1   
%will run 1 epoch in about 200 second and get around 11% error.   
%With 100 epochs you'll get around 1.2% error  
  
cnn.layers = {  
    struct('type', 'i') %input layer  
    struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer  
    struct('type', 's', 'scale', 2) %sub sampling layer  
    struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer  
    struct('type', 's', 'scale', 2) %subsampling layer  
};  
  
% 这里把cnn的设置给cnnsetup,它会据此构建一个完整的CNN网络,并返回  
cnn = cnnsetup(cnn, train_x1, train_y1);  
  
% 学习率  
opts.alpha = 1;  
% 每次挑出一个batchsize的batch来训练,也就是每用batchsize个样本就调整一次权值,而不是  
% 把所有样本都输入了,计算所有样本的误差了才调整一次权值  
opts.batchsize = 50;   
% 训练次数,用同样的样本集。我训练的时候:  
% 1的时候 11.41% error  
% 5的时候 4.2% error  
% 10的时候 2.73% error  
opts.numepochs = 1;  
  
% 然后开始把训练样本给它,开始训练这个CNN网络  
cnn = cnntrain(cnn, train_x1, train_y1, opts);  
  
% 然后就用测试样本来测试  
%[er, bad,h] = cnntest(cnn, test_x1, test_y1);  
[h] = newtest(cnn, newtest3);
%plot mean squared error  
plot(cnn.rL);  
%show test error  
%disp([num2str(er*100) '% error']); 
disp([num2str(h) ' out' ]);

 

 

 




MATLAB数字识别新写文件

%yjy76.m

clear all; close all; clc;  
addpath('../data'); 
addpath('../util');  
load('yjycnn.mat');

 A=cell(1,9);
for i=1:9
       A{i}=imread(['E:\matlab_project\DeepLearnToolbox-master\image\t',num2str(i),'.jpg']);
       testx=A{i};
       [out,tempt] = yjydigit( testx );
disp([num2str(out) ' out' ]); 
end

%yjydigit.m

function [out,tempt] = yjydigit( x )
x=imread('11.jpg');
a=rgb2gray(x);
t=graythresh(a);
bw=im2bw(a,t);
imshow(bw);
bw=1-bw;
i=10;
[m,n]=find(bw==1);
big=max(m);
small=min(m);
left=min(n);
right=max(n);
while(sum(bw(:,i))>=1)
    i=i+1;
end
j=i;
while(sum(bw(:,j))==0)
    j=j+1;
end
temp1=bw(small-2:big+1,left-2:i);
temp2=bw(small-2:big+1,j:right);
temp1=imresize(temp1,[28,28]);
temp2=imresize(temp2,[28,28]);

t=graythresh(temp1);
temp1=im2bw(temp1,t);

t=graythresh(temp2);
temp2=im2bw(temp2,t);

temp1=temp1(:)';
temp2=temp2(:)';
tempt=[temp1;temp2];
tempt=255*tempt;
load('yjycnn.mat')
 
newtest3 = double(reshape(tempt',28,28,2))/255;
[h] = newtest(yjycnn, newtest3);
out=h(1)*10+h(2);
end

%yjyapply.m

clear all; close all; clc;  
addpath('../data'); 
addpath('../util');  
load new12;
load('yjycnn.mat')
newtest3 = double(reshape(new12',28,28,12))/255;
%% ex1   
[h] = newtest(yjycnn, newtest3);
disp([num2str(h) ' out' ]);

%newtest.m

function [h] = newtest(net, x)
    %  feedforward
    net = cnnff(net, x);
    [~, h] = max(net.o);
    h=h-1;
    %[~, a] = max(y);
   % bad = find(h ~= a);

%    err = numel(bad) / size(y, 2);
end

%yjyexample.m

clear all; close all; clc;  
addpath('../data');  
addpath('../util');  
load yjydb;
load new10;
%load new2;
  
train_x1 = double(reshape(yjytrain_x',28,28,60000))/255;  
test_x1 = double(reshape(yjytest_x',28,28,10000))/255;  
train_y1 = double(train_y');  
test_y1 = double(test_y');  
%newtest3 = double(reshape(new10',28,28,10))/255;  
newtest3 = double(reshape(new10',28,28,10))/255;
%% ex1   
%will run 1 epoch in about 200 second and get around 11% error.   
%With 100 epochs you'll get around 1.2% error  
  
cnn.layers = {  
    struct('type', 'i') %input layer  
    struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer  
    struct('type', 's', 'scale', 2) %sub sampling layer  
    struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer  
    struct('type', 's', 'scale', 2) %subsampling layer  
};  
  
% 这里把cnn的设置给cnnsetup,它会据此构建一个完整的CNN网络,并返回  
cnn = cnnsetup(cnn, train_x1, train_y1);  
  
% 学习率  
opts.alpha = 1;  
% 每次挑出一个batchsize的batch来训练,也就是每用batchsize个样本就调整一次权值,而不是  
% 把所有样本都输入了,计算所有样本的误差了才调整一次权值  
opts.batchsize = 50;   
% 训练次数,用同样的样本集。我训练的时候:  
% 1的时候 11.41% error  
% 5的时候 4.2% error  
% 10的时候 2.73% error  
opts.numepochs = 1;  
  
% 然后开始把训练样本给它,开始训练这个CNN网络  
cnn = cnntrain(cnn, train_x1, train_y1, opts);  
  
% 然后就用测试样本来测试  
%[er, bad,h] = cnntest(cnn, test_x1, test_y1);  
[h] = newtest(cnn, newtest3);
%plot mean squared error  
plot(cnn.rL);  
%show test error  
%disp([num2str(er*100) '% error']); 
disp([num2str(h) ' out' ]);