-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathalgorithmVersion.m
More file actions
executable file
·115 lines (76 loc) · 2.45 KB
/
algorithmVersion.m
File metadata and controls
executable file
·115 lines (76 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
function [bestLambda] = algorithmVersion(x_train, y_train);
%% step 1 .
% split the train data arbitrarily
[N x_w] = size(x_train);
[N_y y_w] = size(y_train);
midValue = ceil(N/2);
learningData_x = zeros(midValue, x_w);
testData_x = zeros(N - midValue, x_w);
learningData_y = zeros(midValue, y_w);
testData_y = zeros(N - midValue, y_w);
for i=1:midValue
learningData_x(i,:) = x_train(i,:);
learningData_y(i,:) = y_train(i,:);
end
learnDim = size(learningData_x );
index = 1;
for i=midValue+1:N
testData_x(index,:) = x_train(i,:);
testData_y(index,:) = y_train(i,:);
index = index + 1 ;
end
testDim = size(testData_x);
%% step 2.
% Initaluize for all \lambda_n elementof Q_n with
% D_{n_l} the regression funtion
% m_^{\lambda_i}{n_l} ( . ) = m_^{\lambda_i}{n_l} ( . , D_{n_l})
% resultVector = checks(x,y);
% [a b] = size(x);
% [c d] = size(y);
lambda = generateLambda(); %{2^-10 , ... ,2^10}
[a n] = size(lambda);
% Takes one parameter and initalizes the algorithm
% at initalization use the learning data
%initalParameter = lambda(1);
gStar_outcome = zeros(n,1);
riskResult = zeros(n,1);
%% step 3.
% Calculate the error
% min_{n elementof Q_n} 1 / n_1 \sigma^{n_l + n_t}_{i = n_l + 1 }
% |m^{h}_{n_l} (X_i) - Y_i| ^2
%
% N = n_t + n_l
for j=1:n
gStar_outcome = getYpredict( learningData_x,learningData_y,testData_x,lambda(j) );
riskResult(j) = LTwoRisk(gStar_outcome, testData_y, learnDim, testDim);
end
% valueMinRisk = min(riskResult);
% indexMinRisk = find(riskResult);
[valueMinRisk,indexMinRisk]=min(riskResult, [ ],1)
bestLambda = lambda(indexMinRisk);
% dataMatrix = dataMatrix(K,count,phiMatrix,L,lambda);
% % diff = 0;
% %
% % for i=1:a
% % % for j=1:d
% % for k=1:a
% % % difference = d
% % y_predictTest = getYpredict( x_train,y_train,learningData,lambda(k));
% % difference(i,:) = y_predict - y_predictTest;
% % end
% %
% % % end
% % end
% %
% % for i=1:a
% % minVal = min(difference(i,:))
% % end
% % minVal = min(minVal);
% %
% % min_y_predict = getYpredict( x_train,y_train,learningData,lambda(minVal));
%step 4.
% m_{n}(x) = = m_{n}(x,D_n) = m_{n_l}^(H) ( x, D_{n_l} )
% result = addition;
% parameter is now know
% finalYPredict = getYpredict( learningData_x,learningData_y,testData_x,bestLambda );
end