In: Electrical Engineering
knn based classification with pcasvd reduction technique apply it for classification.function parts are nedded.
Write a matlab code
if you know the answer then only write.or else leave it for others.
image processing ELETRICAL
ans:
function result = knnclassification(testsamplesX,samplesX, samplesY, Knn,type)
% Classify using the Nearest neighbor algorithm
% Inputs:
% samplesX - Train samples
% samplesY - Train labels
% testsamplesX - Test samples
% Knn - Number of nearest neighbors
%
% Outputs
% result - Predicted targets
if nargin < 5
type = '2norm';
end
L = length(samplesY);
Uc = unique(samplesY);
if (L < Knn),
error('You specified more neighbors than there are points.')
end
N = size(testsamplesX, 1);
result = zeros(N,1);
switch type
case '2norm'
for i = 1:N,
dist = sum((samplesX - ones(L,1)*testsamplesX(i,:)).^2,2); %rowwise sum
[m, indices] = sort(dist);
n = hist(samplesY(indices(1:Knn)), Uc);
[m, best] = max(n);
result(i) = Uc(best);
end
case '1norm'
for i = 1:N,
dist = sum(abs(samplesX - ones(L,1)*testsamplesX(i,:)),2);
[m, indices] = sort(dist);
n = hist(samplesY(indices(1:Knn)), Uc);
[m, best] = max(n);
result(i) = Uc(best);
end
case 'match'
for i = 1:N,
dist = sum(samplesX == ones(L,1)*testsamplesX(i,:),2);
[m, indices] = sort(dist);
n = hist(samplesY(indices(1:Knn)), Uc);
[m, best] = max(n);
result(i) = Uc(best);
end
otherwise
error('Unknown measure function');
end
%% RUN command:- result = knnclassification(featureVectorNorm,featureVectorNorm, targetVector, 1,'2norm')
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% this is sometimes faster than the transpose eigenvalue method with
% very large dimensions and small no. of examples
function [U,d] = pcasvd(X,n,smallest)
% X : nt x n, where nt examples of feature vecs of size n
% n returns n dimensions
% smallest is 1 when we want the evecs assoc with the smallest evals
% % U is evecs, d is diagonals
% Copyright (c) 2013, Vipin Vijayan.
if nargin < 3, smallest = 0; end;
[U,S] = svd(X,'econ');
s = diag(S);
if ~smallest,
tol = max(size(X)) * max(s) * eps(class(X));
n = min(n, sum(s > tol));
U = U(:,1:n);
s = s(1:n);
else
n = min(n, size(U,2));
U = U(:,end:-1:end-n+1);
s = s(end:-1:end-n+1);
end
d = s.^2; % D = diag(s.^2);
end