class MLP_regresor():
def _init_(self):
重みの定義
self.w1=np.random.randn(2, 50) * 0.1
(平均0、分散1(標準偏差1)の乱数を生成 (~個, 中身の数))
self.w2=np.random.randn(50,50) * 0.1
self.w3=np.random.randn(50,10) * 0.1
self.w4 = np.random.randn (10,1) * 0.1
バイアスの定義
self.b1 = np.zeros(50, dtype=float)
self.b2 = np.zeros(50, dtype = float)
self.b3 = np.zeros(10, dtype = float)
self.b4 = np.zeros(1, dtype= float)
def forward(self,x):
self.layer0 = x
self.layer1 = relu(np.dot(self.layer0, self.w1) + self.b1)
self.layer2 = relu (np.dot(self.layer1, self.w2) + self.b2)
self.layer3 = relu (np.dot(self.layer2, self.w3) + self.b3)
self.out = np.dot(self.layer3, self.w4) + self.b4
def backward(self, t, y):
誤差逆伝播
delta4 = -2 * (t-y)
delta3 = np.dot(delta4, self.w4.transpose())
delta2 = np.dot((delta3 * np.where(self.layer3 >0,1,0)),self.w3.transpose())
delta1 = np.dot((delta2 * np.where(self.layer2>0 , 1, 0), self.w2.tanspose())
バイアスbのコスト関数eに対する勾配 分からん
self.dedb4 = np.mean(delta4, axis=0)
self.debd3 = np.mean(delta3 * (self.layer3 >0), axis=0)
self.debd2 = np.mean(delta2 * (self.layer2 >0), axis=0)
self.debd1 = np.mean(delta1 * (self.layer1>0) , axis=0)
重みwのコスト関数eに対する勾配
self.dedw4 = np.dot(self.layer3.T, delta4) /delta4.shape[0]
self.dedw3 = np.dot(self.layer2.T, delta3 * (self.layer3 >0)) / delta3.shape[0]
self.dedw2 = np.dot(self.layer1.T, delta2 * (self.layer2 > 0))/ delta2.shape[0]
self.dedw1 = np.dot(self.layer0.T, delta1 * (self.layer1 >0 ))/ delta1.shape[0]
def optimize_Gradient(self, lr):
self.b1 -= lr * self.debd1
self.b2 -= lr * self.debd2
self.b3 -= lr * self.debd3
self.b4 -= lr * self.debd4
self.w1 -= lr * self.dedw1
self.w2 -= lr * self.dedw2
self.w3 -= lr * self.dedw3
self.w4 -= lr * self.dedw4
学習
model = MLP_regressor()
lr = 0.01
n_epoch = 500
x_train = train_data[:, 0:2]
t_train = train_data[:, 2:3]
x_test = test_data[:, 0:2]
for n in range(n_epoch):
y = model.forward(x_train)
train_loss = MSE(t_train,y)
model.backward(t_train,y)
model.optimize_GradientDecent(lr)
y= model.forward(x_test)
test_loss = MSE(t_test, y)
print('EPOCH', n+1, '| TRAIN LOSS ', train_loss, '| TEST LOSS ', test_loss)
regression_loss = test_loss
コメント