www.gusucode.com > 2dpca的matlab源代码 > 2dpca的matlab源代码/TDPCA.m

    function [eigvectors, eigvalues, meanData, newTrainData, newTestData] = TDPCA(trainData, testData, height, width, numvecs)
%2DPCA	Two Dimensional Principal component analysis
%	Usage:
%	[eigvectors, eigvalues, meanData, newTrainData, newTestData] = TDPCA(trainData, testData, height, width, numvecs)
%
%	trainData: Rows of vectors of training data points
%   testData: Rows of vectors of testing data points
%   height: height of the image matrix
%   width: width of the image matrix
%   numvecs: the needed number of eigenvectors
%          
%   meanData: Mean of all the data. 
%	newTrainData: The data after projection (mean removed)
%   newTestData: The data after projection (mean removed)
%	eigvectors: Each column of this matrix is a eigenvector of the convariance
%	           matrix defined in 2DPCA
%	eigvalues: Eigenvalues of the convariance matrix
%
%
%   Reference paper: J.Yang,D.Zhang,A.F.Frangi,and J.Yang.Two-dimensional
%                    pca:A new approach to a appearance-based face
%                    represenation and recognition. IEEE Trans.on
%                    PAMI,2004
%   Written by Zhonghua Shen (cnjsnt_s@yahoo.com.cn), 2006.07

% Check arguments

if nargin ~= 5
    error('usage: [eigvectors, eigvalues, meanData, newTrainData, newTestData] = TDPCA(trainData, testData, height, width, numvecs)');
end;

[nSam,nFea] = size(trainData);

fprintf(1,'Computing average matrix...\n');
meanDataVector = mean(trainData);
meanData = reshape(meanDataVector,height,width);

fprintf(1,'Calculating matrix differences from avg and 2DPCA covariance matrix L...\n');
L = zeros(width,width);
ddata = zeros(nSam,nFea);
for i = 1:nSam
    ddata(i,:) = double(trainData(i,:))-meanDataVector;
    dummyMat = reshape(ddata(i,:),height,width);
    L = L + dummyMat'*dummyMat;
end;
L = L/nSam;
L = (L + L')/2;


fprintf(1,'Calculating eigenvectors of L...\n');
[eigvectors,eigvalues] = eig(L);

fprintf(1,'Sorting eigenvectors according to eigenvalues...\n');
[eigvectors,eigvalues] = sortem(eigvectors,eigvalues);
eigvalues = diag(eigvalues);

fprintf(1,'Normalize Vectors to unit length, kill vectors corr. to tiny evalues...\n');
num_good = 0;
for i = 1:size(eigvectors,2)
    eigvectors(:,i) = eigvectors(:,i)/norm(eigvectors(:,i));
    if eigvalues(i) < 0.00001
        % Set the vector to the 0 vector; set the value to 0.
        eigvalues(i) = 0;
        eigvectors(:,i) = zeros(size(eigvectors,1),1);
    else
        num_good = num_good + 1;
    end;
end;
if (numvecs > num_good)
    fprintf(1,'Warning: numvecs is %d; only %d exist.\n',numvecs,num_good);
    numvecs = num_good;
end;
eigvectors = eigvectors(:,1:numvecs);

if nargout == 5
fprintf(1,'Feature extraction and calculating new training and testing data...\n');
newTrainData = zeros(nSam,height*numvecs);
for i = 1:nSam
    dummyMat = reshape(ddata(i,:),height,width);
    newTrainData(i,:) = reshape(dummyMat*eigvectors,1,height*numvecs);
end

%testData
nt = size(testData,1);
newTestData = zeros(nt,height*numvecs);
tdata = zeros(size(testData));
for i = 1:nt
    tdata(i,:) = double(testData(i,:))-meanDataVector;
    dummyMat = reshape(tdata(i,:),height,width);
    newTestData(i,:) = reshape(dummyMat*eigvectors,1,height*numvecs);
end;
end;