修改生成数据方式和网络训练方式
This commit is contained in:
@ -1,6 +1,8 @@
|
||||
import os
|
||||
import pickle
|
||||
|
||||
import numpy as np
|
||||
import torch.nn
|
||||
|
||||
from base_optimizer.optimizer_interface import *
|
||||
from generator import *
|
||||
@ -9,11 +11,11 @@ os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
|
||||
|
||||
|
||||
class Net(torch.nn.Module):
|
||||
def __init__(self, input_size, output_size):
|
||||
def __init__(self, input_size, hidden_size=1024, output_size=1):
|
||||
super(Net, self).__init__()
|
||||
self.fc1 = torch.nn.Linear(input_size, 1024)
|
||||
self.fc1 = torch.nn.Linear(input_size, hidden_size)
|
||||
self.relu = torch.nn.ReLU() # 激活函数
|
||||
self.fc2 = torch.nn.Linear(1024, output_size)
|
||||
self.fc2 = torch.nn.Linear(hidden_size, output_size)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.fc1(x)
|
||||
@ -22,6 +24,19 @@ class Net(torch.nn.Module):
|
||||
return x
|
||||
|
||||
|
||||
class LSTMNet(torch.nn.Module):
|
||||
def __init__(self, input_size, hidden_size=256, output_size=1, num_layers=1):
|
||||
super(LSTMNet, self).__init__()
|
||||
|
||||
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers)
|
||||
self.fc = torch.nn.Linear(hidden_size, output_size)
|
||||
|
||||
def forward(self, x):
|
||||
x, _ = self.lstm(x) # x is input with size (seq_len, batch_size, input_size)
|
||||
x = self.fc(x)
|
||||
return x[-1, :, ]
|
||||
|
||||
|
||||
def selective_initialization(component_points, population_size, machine_number):
|
||||
# assignment_result = [[0 for _ in range(len(component_points))] for _ in range(machine_number)]
|
||||
assignment_result = []
|
||||
@ -44,58 +59,74 @@ def optimizer_hyperheuristc(pcb_data, component_data, machine_number):
|
||||
if __name__ == '__main__':
|
||||
warnings.simplefilter(action='ignore', category=FutureWarning)
|
||||
|
||||
train_file, test_file = 'train_data.txt', 'test_data.txt'
|
||||
num_epochs = 30000
|
||||
parser = argparse.ArgumentParser(description='network training implementation')
|
||||
parser.add_argument('--train', default=True, type=bool, help='determine whether training the network')
|
||||
parser.add_argument('--save', default=True, type=bool,
|
||||
help='determine whether saving the parameters of network, linear regression model, etc.')
|
||||
parser.add_argument('--overwrite', default=False, type=bool,
|
||||
help='determine whether overwriting the training and testing data')
|
||||
parser.add_argument('--train_file', default='train_data.txt', type=str, help='training file path')
|
||||
parser.add_argument('--test_file', default='test_data.txt', type=str, help='testing file path')
|
||||
parser.add_argument('--num_epochs', default=15000, type=int, help='number of epochs for training process')
|
||||
parser.add_argument('--batch_size', default=100000, type=int, help='size of training batch')
|
||||
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate for the network')
|
||||
|
||||
params = parser.parse_args()
|
||||
|
||||
data_mgr = DataMgr()
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# batch_size = 40000
|
||||
# for _ in range(batch_size):
|
||||
# pcb_data, component_data = data_mgr.generator() # random generate a PCB data
|
||||
# # data_mgr.remover() # 移除最近一次保存数据
|
||||
# # data_mgr.saver('data/' + train_file, pcb_data) # 保存新数据
|
||||
#
|
||||
# info = base_optimizer(1, pcb_data, component_data,
|
||||
# feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method='feeder_scan',
|
||||
# hinter=True)
|
||||
#
|
||||
# data_mgr.recorder('opt/' + train_file, info, pcb_data, component_data)
|
||||
if params.overwrite:
|
||||
file = {params.train_file: params.batch_size,
|
||||
params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
|
||||
for file_name, file_batch_size in file.items():
|
||||
for _ in range(int(file_batch_size)):
|
||||
with open('opt/' + file_name, 'a') as f:
|
||||
mode = file_name.split('.')[0].split('_')[0]
|
||||
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
|
||||
# data_mgr.remover() # remove the last saved data
|
||||
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
|
||||
|
||||
train, save = True, True
|
||||
learning_rate = 0.0005
|
||||
info = base_optimizer(1, pcb_data, component_data,
|
||||
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
|
||||
method='feeder_scan',
|
||||
hinter=True)
|
||||
|
||||
data_mgr.recorder(f, info, pcb_data, component_data)
|
||||
f.close()
|
||||
|
||||
net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
|
||||
if train:
|
||||
data = data_mgr.loader('opt/' + train_file)
|
||||
if params.train:
|
||||
data = data_mgr.loader('opt/' + params.train_file)
|
||||
x_fit, y_fit = np.array(data[2:]).T, np.array([data[1]]).T
|
||||
lr = LinearRegression()
|
||||
lr.fit(x_fit, y_fit)
|
||||
|
||||
x_train, y_train = np.array(data[0]), lr.predict(x_fit) # np.array(data[1])
|
||||
x_train, y_train = np.array(data[0][::10]), lr.predict(x_fit[::10])
|
||||
# x_train, y_train = np.array(data[0]), np.array(data[2])
|
||||
|
||||
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
|
||||
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
|
||||
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.1)
|
||||
optimizer = torch.optim.Adam(net.parameters(), lr=params.lr)
|
||||
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=6000, gamma=0.8)
|
||||
|
||||
loss_func = torch.nn.MSELoss()
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
for epoch in range(params.num_epochs):
|
||||
pred = net(x_train)
|
||||
loss = loss_func(pred, y_train)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# scheduler.step()
|
||||
if epoch % 200 == 0:
|
||||
if epoch % 50 == 0:
|
||||
print('Epoch: ', epoch, ', Loss: ', loss.item())
|
||||
if loss.item() < 1e-4:
|
||||
break
|
||||
|
||||
net_predict = net(x_train).view(-1)
|
||||
# pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
|
||||
pred_time, real_time = net_predict.cpu().detach().numpy(), np.array(data[1])
|
||||
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
|
||||
|
||||
pred_error = np.array([])
|
||||
for t1, t2 in np.nditer([pred_time, real_time]):
|
||||
@ -107,21 +138,24 @@ if __name__ == '__main__':
|
||||
|
||||
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
|
||||
print(f'mean square error for training data result : {mse: 2f} ')
|
||||
if save:
|
||||
torch.save(net.state_dict(), 'model_state.pth')
|
||||
with open('lr_model.pkl', 'wb') as f:
|
||||
if params.save:
|
||||
if not os.path.exists('model'):
|
||||
os.mkdir('model')
|
||||
torch.save(net.state_dict(), 'model/net_model.pth')
|
||||
with open('model/lr_model.pkl', 'wb') as f:
|
||||
pickle.dump(lr, f)
|
||||
joblib.dump(lr, "lr_model.m")
|
||||
# torch.save(optimizer.state_dict(), 'optimizer_state.pth')
|
||||
# torch.save(optimizer.state_dict(), 'model/optimizer_state.pth')
|
||||
else:
|
||||
with open('lr_model.pkl', 'rb') as f:
|
||||
with open('model/lr_model.pkl', 'rb') as f:
|
||||
lr = pickle.load(f)
|
||||
net.load_state_dict(torch.load('model_state.pth'))
|
||||
net.load_state_dict(torch.load('model/net_model.pth'))
|
||||
# optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
|
||||
# optimizer.load_state_dict(torch.load('optimizer_state.pth'))
|
||||
# optimizer.load_state_dict(torch.load('model/optimizer_state.pth'))
|
||||
|
||||
data = data_mgr.loader('opt/' + test_file)
|
||||
data = data_mgr.loader('opt/' + params.test_file)
|
||||
# x_test, y_test = np.array(data[0]), np.array(data[1])
|
||||
x_test, y_test = np.array(data[0]), lr.predict(np.array(data[2:]).T)
|
||||
|
||||
x_test, y_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device), \
|
||||
torch.from_numpy(y_test.reshape((-1, 1))).float().to(device)
|
||||
|
||||
@ -133,7 +167,8 @@ if __name__ == '__main__':
|
||||
pred_error = np.array([])
|
||||
for t1, t2 in np.nditer([pred_time, real_time]):
|
||||
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
||||
|
||||
print(pred_time)
|
||||
print(real_time)
|
||||
print('--------------------------------------')
|
||||
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
|
||||
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
|
||||
|
Reference in New Issue
Block a user