www.gusucode.com > classification_matlab_toolbox分类方法工具箱源码程序 > code/Classification_toolbox/NLPCA.m

    function [new_features, targets] = NLPCA(features, targets, params, region)

% Reshape the data using the non-linear PCA algorithm
% Inputs:
% 	features- Train features
%	targets	- Train targets
%	params   - [Number of output dimensions, Number of hidden units]
%	region	- Decision region vector: [-x x -y y number_of_points]
%
% Outputs
%	features - New features
%	targets  - Targets

[Ni, M] = size(features);
[Dout, Nh] = process_params(params);

Theta = 0.05;
eta	= 0.1/M;
iter	= 0;
IterDisp = 1;

features= [features; ones(1,M)];
Ni			= Ni + 1;

if (Dout > Ni),
   error('Error: The output dimension should be smaller than the input dimension')
end

%Initialize the net: 
%This net has two non-linear hidden layers separated by a linear layer
%The output layer is linear
w0		= max(abs(std(features')'));
Wnl1	= rand(Nh, Ni).*w0*2-w0; %Hidden weights
Wl		= rand(Dout, Nh).*w0*2-w0; %Output weights
Wnl2	= rand(Nh, Dout).*w0*2-w0; %Hidden weights
Wo		= rand(Ni, Nh).*w0*2-w0; %Output layer

J       = M;
gradJ   = 1e3;

while (gradJ > Theta),
    deltaWnl1	= 0;
    deltaWl		= 0;
    deltaWnl2	= 0;
    deltaWo		= 0;
    
    for m = 1:M,
        Xm = features(:,m);
        tk = features(:,m);
        
        %Forward propagate the input:
        %...to the first NL hidden units
        gh1				= Wnl1*Xm;
        [y1, dfh1]	= activation(gh1);
        
        %...to the linear hidden units
        y2				= Wl*y1;
        
        %...to the second NL hidden units
        gh3				= Wnl2*y2;
        [y3, dfh3]	= activation(gh3);

        %...to the output unit
        zk				= Wo*y3;;
        
        %Backpropagation!
        %Evaluate delta_k at the output: delta_k = (tk-zk)*f'(net)
        delta_k		= (tk - zk);
        
        %...and delta_j: delta_j = f'(net)*w_j*delta_k
        delta_j3		= dfh3.*sum((Wo.*(delta_k*ones(1,Nh))))';
        delta_j2		= sum((Wnl2.*(delta_j3*ones(1,Dout))))';
        delta_j1		= dfh1.*sum((Wl.*(delta_j2*ones(1,Nh))))';
        
        %delta_w_kj <- w_kj + eta*delta_k*y_j
        deltaWo		= deltaWo   + eta*delta_k*y3';
        deltaWnl2		= deltaWnl2 + eta*delta_j3*y2';
        deltaWl		= deltaWl   + eta*delta_j2*y1';
        deltaWnl1		= deltaWnl1 + eta*delta_j1*Xm';
        
    end
    
    %w_kj <- w_kj + eta*delta_Wo
    Wo				= Wo   + deltaWo;
    Wnl2				= Wnl2 + deltaWnl2;
    Wl				= Wl   + deltaWl;
    Wnl1				= Wnl1 + deltaWnl1;
    
    %Calculate total error
    oldJ = J;
    J    = 0;
    for i = 1:M,
       Xm				= features(:,i);
       gh1				= Wnl1*Xm;
       [y1, dfh1]		= activation(gh1);
       y2				= Wl*y1;
       gh3				= Wnl2*y2;
       [y3, dfh3]		= activation(gh3);
       zk				= Wo*y3;;
       J = J + sum((zk-Xm).^2);
    end
    J		 = sqrt(J/M);
    gradJ = abs(J - oldJ);
    
    iter 			= iter + 1;
    if (iter/IterDisp == floor(iter/IterDisp)),
        disp(['Iteration ' num2str(iter) ': Total error is ' num2str(J)])
    end
    
end

disp(['Backpropagation converged after ' num2str(iter) ' iterations.'])

%Translate the features by propagating them through the first two layers of the network
new_features = zeros(Dout,M);
for i = 1:M,
   Xm				= features(:,i);
   gh1			= Wnl1*Xm;
   [y1, dfh1]	= activation(gh1);
   new_features(:,i)	= Wl*y1;
end




function [f, df] = activation(x)

a = 1.716;
b = 2/3;
f	= a*tanh(b*x);
df	= a*b*sech(b*x).^2;