144 lines
5.6 KiB
Python
144 lines
5.6 KiB
Python
import pickle
|
|
|
|
import numpy as np
|
|
|
|
from base_optimizer.optimizer_interface import *
|
|
from generator import *
|
|
|
|
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
|
|
|
|
|
|
class Net(torch.nn.Module):
|
|
def __init__(self, input_size, output_size):
|
|
super(Net, self).__init__()
|
|
self.fc1 = torch.nn.Linear(input_size, 1024)
|
|
self.relu = torch.nn.ReLU() # 激活函数
|
|
self.fc2 = torch.nn.Linear(1024, output_size)
|
|
|
|
def forward(self, x):
|
|
x = self.fc1(x)
|
|
x = self.relu(x)
|
|
x = self.fc2(x)
|
|
return x
|
|
|
|
|
|
def selective_initialization(component_points, population_size, machine_number):
|
|
# assignment_result = [[0 for _ in range(len(component_points))] for _ in range(machine_number)]
|
|
assignment_result = []
|
|
|
|
return assignment_result
|
|
|
|
|
|
def optimizer_hyperheuristc(pcb_data, component_data, machine_number):
|
|
|
|
# genetic-based hyper-heuristic
|
|
crossover_rate, mutation_rate = 0.8, 0.1
|
|
population_size, n_generations = 200, 500
|
|
|
|
# todo: how to generate initial population (random?)
|
|
# assignment_result = selective_initialization(component_points, population_size, machine_number)
|
|
assignment_result = []
|
|
return assignment_result
|
|
|
|
|
|
if __name__ == '__main__':
|
|
warnings.simplefilter(action='ignore', category=FutureWarning)
|
|
|
|
train_file, test_file = 'train_data.txt', 'test_data.txt'
|
|
num_epochs = 30000
|
|
|
|
data_mgr = DataMgr()
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
# batch_size = 40000
|
|
# for _ in range(batch_size):
|
|
# pcb_data, component_data = data_mgr.generator() # random generate a PCB data
|
|
# # data_mgr.remover() # 移除最近一次保存数据
|
|
# # data_mgr.saver('data/' + train_file, pcb_data) # 保存新数据
|
|
#
|
|
# info = base_optimizer(1, pcb_data, component_data,
|
|
# feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method='feeder_scan',
|
|
# hinter=True)
|
|
#
|
|
# data_mgr.recorder('opt/' + train_file, info, pcb_data, component_data)
|
|
|
|
train, save = True, True
|
|
learning_rate = 0.0005
|
|
|
|
net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
|
|
if train:
|
|
data = data_mgr.loader('opt/' + train_file)
|
|
x_fit, y_fit = np.array(data[2:]).T, np.array([data[1]]).T
|
|
lr = LinearRegression()
|
|
lr.fit(x_fit, y_fit)
|
|
|
|
x_train, y_train = np.array(data[0]), lr.predict(x_fit) # np.array(data[1])
|
|
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
|
|
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
|
|
|
|
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
|
|
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.1)
|
|
|
|
loss_func = torch.nn.MSELoss()
|
|
|
|
for epoch in range(num_epochs):
|
|
pred = net(x_train)
|
|
loss = loss_func(pred, y_train)
|
|
optimizer.zero_grad()
|
|
loss.backward()
|
|
optimizer.step()
|
|
# scheduler.step()
|
|
if epoch % 200 == 0:
|
|
print('Epoch: ', epoch, ', Loss: ', loss.item())
|
|
if loss.item() < 1e-4:
|
|
break
|
|
|
|
net_predict = net(x_train).view(-1)
|
|
# pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
|
|
pred_time, real_time = net_predict.cpu().detach().numpy(), np.array(data[1])
|
|
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([pred_time, real_time]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
|
|
|
|
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
|
|
print(f'mean square error for training data result : {mse: 2f} ')
|
|
if save:
|
|
torch.save(net.state_dict(), 'model_state.pth')
|
|
with open('lr_model.pkl', 'wb') as f:
|
|
pickle.dump(lr, f)
|
|
joblib.dump(lr, "lr_model.m")
|
|
# torch.save(optimizer.state_dict(), 'optimizer_state.pth')
|
|
else:
|
|
with open('lr_model.pkl', 'rb') as f:
|
|
lr = pickle.load(f)
|
|
net.load_state_dict(torch.load('model_state.pth'))
|
|
# optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
|
|
# optimizer.load_state_dict(torch.load('optimizer_state.pth'))
|
|
|
|
data = data_mgr.loader('opt/' + test_file)
|
|
x_test, y_test = np.array(data[0]), lr.predict(np.array(data[2:]).T)
|
|
x_test, y_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device), \
|
|
torch.from_numpy(y_test.reshape((-1, 1))).float().to(device)
|
|
|
|
net.eval()
|
|
with torch.no_grad():
|
|
net_predict = net(x_test).view(-1)
|
|
pred_time, real_time = net_predict.cpu().detach().numpy(), y_test.view(-1).cpu().detach().numpy()
|
|
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([pred_time, real_time]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
|
|
|
|
mse = np.linalg.norm(pred_time - real_time)
|
|
print(f'mean square error for test data result : {mse: 2f} ')
|
|
|