我們使用深度學習網絡實現波士頓房價預測,深度學習的目的就是尋找一個合適的函數輸出我們想要的結果。深度學習實際上是機器學習領域中一個研究方向,深度學習的目標是讓機器能夠像人一樣具有分析學習的能力,能夠識別文字、圖像、聲音等數據。我認為深度學習與機器學習最主要的區別就是神經元。
公司主營業務:成都網站建設、網站設計、移動網站開發等業務。幫助企業客戶真正實現互聯網宣傳,提高企業的競爭能力。創新互聯是一支青春激揚、勤奮敬業、活力青春激揚、勤奮敬業、活力澎湃、和諧高效的團隊。公司秉承以“開放、自由、嚴謹、自律”為核心的企業文化,感謝他們對我們的高要求,感謝他們從不同領域給我們帶來的挑戰,讓我們激情的團隊有機會用頭腦與智慧不斷的給客戶帶來驚喜。創新互聯推出坡頭免費做網站回饋大家。
基本構造
為什么引入激活函數
激活函數的種類
神經網絡解決的問題有很多,例如分類、預測、回歸等。這里我們給出兩個解決類型。
分類
預測
使用paddle飛槳波士頓數據集
https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/text/UCIHousing_cn.html
## 繪圖
Batch = 0
Batchs = []
all_train_accs = []
def draw_train_acc(Batchs,train_accs):
title = "training accs"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("acc")
plt.plot(Batchs, train_accs, color = 'green', label = 'training accs')
plt.legend()
plt.grid()
plt.show()
all_train_loss = []
def draw_train_loss(Batchs,train_loss):
title = "training loss"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("loss")
plt.plot(Batchs, train_loss, color = 'red', label = 'training loss')
plt.legend()
plt.grid()
plt.show()
## 繪制真實值與預測值的對比圖
def draw_infer_result(groud_truths, infer_results):
title = 'Boston'
plt.title(title)
x = np.arange(1,20)
y = x
plt.plot(x,y);
plt.xlabel("ground truth")
plt.ylabel("infer result")
plt.scatter(groud_truths,infer_results,color='green',label='training cost')
plt.grid()
plt.show()
'''
核心
網絡搭建
'''
class MyDNN(paddle.nn.Layer):
def __init__(self):
super(MyDNN, self).__init__()
#self.linear1 = paddle.nn.Linear(13,1,None) #全連接層,paddle.nn.Linear(in_features,out_features,weight)
self.linear1 = paddle.nn.Linear(13, 32, None)
self.linear2 = paddle.nn.Linear(32, 64, None)
self.linear3 = paddle.nn.Linear(64, 32, None)
self.linear4 = paddle.nn.Linear(32, 1, None)
def forward(self, inputs): ## 傳播函數
x = self.linear1(inputs)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
'''
網絡訓練與測試
'''
## 實例化
model = MyDNN()
model.train()
mse_loss = paddle.nn.MSELoss()
opt = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters())
epochs_num = 100
for epochs in range(epochs_num):
for batch_id,data in enumerate(train_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict, label)
loss.backward()
opt.step()
opt.clear_grad()
if batch_id!=0 and batch_id%10 == 0:
Batch = Batch+10
Batchs.append(Batch)
all_train_loss.append(loss.numpy()[0])
print("epoch{},step:{},train_loss:{}".format(epochs,batch_id,loss.numpy()[0]))
paddle.save(model.state_dict(),"UCIHousingDNN")
draw_train_loss(Batchs,all_train_loss)
para_state = paddle.load("UCIHousingDNN")
model = MyDNN()
model.eval()
model.set_state_dict(para_state)
losses = []
for batch_id,data in enumerate(eval_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict,label)
losses.append(loss.numpy()[0])
avg_loss = np.mean(losses)
print(avg_loss)
draw_infer_result(label,predict)
## 深度學習框架
import paddle
import numpy as np
import os
import matplotlib.pyplot as plt
## 繪圖
Batch = 0
Batchs = []
all_train_accs = []
def draw_train_acc(Batchs,train_accs):
title = "training accs"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("acc")
plt.plot(Batchs, train_accs, color = 'green', label = 'training accs')
plt.legend()
plt.grid()
plt.show()
all_train_loss = []
def draw_train_loss(Batchs,train_loss):
title = "training loss"
plt.title(title)
plt.xlabel("batch")
plt.ylabel("loss")
plt.plot(Batchs, train_loss, color = 'red', label = 'training loss')
plt.legend()
plt.grid()
plt.show()
## 繪制真實值與預測值的對比圖
def draw_infer_result(groud_truths, infer_results):
title = 'Boston'
plt.title(title)
x = np.arange(1,20)
y = x
plt.plot(x,y);
plt.xlabel("ground truth")
plt.ylabel("infer result")
plt.scatter(groud_truths,infer_results,color='green',label='training cost')
plt.grid()
plt.show()
'''
數據集加載
'''
train_dataset = paddle.text.datasets.UCIHousing(mode="train")
eval_dataset = paddle.text.datasets.UCIHousing(mode="test")
train_loader = paddle.io.DataLoader(train_dataset,batch_size=32, shuffle=True)
eval_loader = paddle.io.DataLoader(eval_dataset,batch_size=8,shuffle=False)
print(train_dataset[1])
'''
核心
網絡搭建
'''
class MyDNN(paddle.nn.Layer):
def __init__(self):
super(MyDNN, self).__init__()
#self.linear1 = paddle.nn.Linear(13,1,None) #全連接層,paddle.nn.Linear(in_features,out_features,weight)
self.linear1 = paddle.nn.Linear(13, 32, None)
self.linear2 = paddle.nn.Linear(32, 64, None)
self.linear3 = paddle.nn.Linear(64, 32, None)
self.linear4 = paddle.nn.Linear(32, 1, None)
def forward(self, inputs): ## 傳播函數
x = self.linear1(inputs)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
'''
網絡訓練與測試
'''
## 實例化
model = MyDNN()
model.train()
mse_loss = paddle.nn.MSELoss()
opt = paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters())
epochs_num = 100
for epochs in range(epochs_num):
for batch_id,data in enumerate(train_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict, label)
loss.backward()
opt.step()
opt.clear_grad()
if batch_id!=0 and batch_id%10 == 0:
Batch = Batch+10
Batchs.append(Batch)
all_train_loss.append(loss.numpy()[0])
print("epoch{},step:{},train_loss:{}".format(epochs,batch_id,loss.numpy()[0]))
paddle.save(model.state_dict(),"UCIHousingDNN")
draw_train_loss(Batchs,all_train_loss)
para_state = paddle.load("UCIHousingDNN")
model = MyDNN()
model.eval()
model.set_state_dict(para_state)
losses = []
for batch_id,data in enumerate(eval_loader()):
feature = data[0]
label = data[1]
predict = model(feature)
loss = mse_loss(predict,label)
losses.append(loss.numpy()[0])
avg_loss = np.mean(losses)
print(avg_loss)
draw_infer_result(label,predict)
文章題目:【深度學習】DNN房價預測
文章位置:http://www.hntjjpw.com/article2/dsogioc.html
成都網站建設公司_創新互聯,為您提供面包屑導航、網站營銷、軟件開發、網站內鏈、營銷型網站建設、移動網站建設
聲明:本網站發布的內容(圖片、視頻和文字)以用戶投稿、用戶轉載內容為主,如果涉及侵權請盡快告知,我們將會在第一時間刪除。文章觀點不代表本網站立場,如需處理請聯系客服。電話:028-86922220;郵箱:631063699@qq.com。內容未經允許不得轉載,或轉載時需注明來源: 創新互聯