627 lines
28 KiB
Python
627 lines
28 KiB
Python
import copy
|
|
import random
|
|
|
|
from generator import *
|
|
from base_optimizer.optimizer_interface import *
|
|
|
|
|
|
class Net(torch.nn.Module):
|
|
def __init__(self, input_size, hidden_size=1000, output_size=1):
|
|
super(Net, self).__init__()
|
|
self.fc1 = torch.nn.Linear(input_size, hidden_size)
|
|
self.relu = torch.nn.ReLU() # 激活函数
|
|
self.fc2 = torch.nn.Linear(hidden_size, hidden_size)
|
|
# self.relu1 = torch.nn.ReLU() # 激活函数
|
|
self.fc3 = torch.nn.Linear(hidden_size, output_size)
|
|
|
|
def forward(self, x):
|
|
x = self.fc1(x)
|
|
# x = self.relu(x)
|
|
x = self.fc2(x)
|
|
x = self.relu(x)
|
|
x = self.fc3(x)
|
|
return x
|
|
|
|
|
|
class LSTMNet(torch.nn.Module):
|
|
def __init__(self, input_size, hidden_size=256, output_size=1, num_layers=1):
|
|
super(LSTMNet, self).__init__()
|
|
|
|
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers)
|
|
self.fc = torch.nn.Linear(hidden_size, output_size)
|
|
|
|
def forward(self, x):
|
|
x, _ = self.lstm(x) # x is input with size (seq_len, batch_size, input_size)
|
|
x = self.fc(x)
|
|
return x[-1, :, ]
|
|
|
|
|
|
class Estimator:
|
|
def __init__(self):
|
|
self.data_mgr = DataMgr()
|
|
|
|
@staticmethod
|
|
def training(self, params):
|
|
pass
|
|
|
|
@staticmethod
|
|
def testing(self, params):
|
|
pass
|
|
|
|
@staticmethod
|
|
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
|
|
pass
|
|
|
|
|
|
class NeuralEstimator(Estimator):
|
|
def __init__(self):
|
|
super().__init__()
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device)
|
|
self.net_file = 'model/net_model.pth'
|
|
if os.path.exists(self.net_file):
|
|
self.net.load_state_dict(torch.load(self.net_file))
|
|
|
|
def init_weights(self):
|
|
for m in self.net.modules():
|
|
if isinstance(m, torch.nn.Linear):
|
|
torch.nn.init.xavier_uniform_(m.weight)
|
|
torch.nn.init.zeros_(m.bias)
|
|
|
|
def training(self, params):
|
|
self.init_weights() # 初始化参数
|
|
data = data_mgr.loader('opt/' + params.train_file)
|
|
x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()]))
|
|
y_train = np.array(data[1][::data_mgr.get_update_round()])
|
|
|
|
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
|
|
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
|
|
|
|
optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr)
|
|
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
|
|
|
|
loss_func = torch.nn.MSELoss()
|
|
|
|
for epoch in range(params.num_epochs):
|
|
pred = self.net(x_train)
|
|
loss = loss_func(pred, y_train)
|
|
optimizer.zero_grad()
|
|
loss.backward()
|
|
optimizer.step()
|
|
# scheduler.step()
|
|
if epoch % 100 == 0:
|
|
print('Epoch: ', epoch, ', Loss: ', loss.item())
|
|
if loss.item() < 1e-4:
|
|
break
|
|
|
|
net_predict = self.net(x_train).view(-1)
|
|
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
|
|
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([pred_time, real_time]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
|
|
|
|
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
|
|
print(f'mean square error for training data result : {mse: 2f} ')
|
|
if params.save:
|
|
if not os.path.exists('model'):
|
|
os.mkdir('model')
|
|
torch.save(self.net.state_dict(), self.net_file)
|
|
# self.net.load_state_dict(torch.load(self.net_file))
|
|
|
|
def testing(self, params):
|
|
data = data_mgr.loader('opt/' + params.test_file)
|
|
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
|
|
|
|
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
|
|
|
|
self.net.eval()
|
|
with torch.no_grad():
|
|
pred_time = self.net(x_test).view(-1).cpu().detach().numpy()
|
|
# x_test = x_test.cpu().detach().numpy()
|
|
|
|
over_set = []
|
|
pred_idx, pred_error = 0, np.array([])
|
|
for t1, t2 in np.nditer([pred_time, y_test.reshape(-1)]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
if pred_error[-1] > 5:
|
|
over_set.append(pred_idx + 1)
|
|
print(f'\033[0;31;31midx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
|
|
f'gap: {pred_error[-1]: .3f}\033[0m')
|
|
# else:
|
|
# print(f'idx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, gap: {pred_error[-1]: .3f}')
|
|
|
|
pred_idx += 1
|
|
|
|
print('over:', over_set)
|
|
print('size:', len(over_set))
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for test data : {np.average(pred_error): .3f}% ')
|
|
print(f'maximum prediction error for test data : {np.max(pred_error): .3f}% ')
|
|
|
|
mse = np.linalg.norm(pred_time - y_test.reshape(-1))
|
|
print(f'mean square error for test data result : {mse: 2f} ')
|
|
|
|
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
|
|
assert board_width is not None and board_height is not None
|
|
encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
|
|
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
|
|
return self.net(encoding)[0, 0].item()
|
|
|
|
|
|
class HeuristicEstimator(Estimator):
|
|
def __init__(self):
|
|
super().__init__()
|
|
|
|
self.lr = LinearRegression()
|
|
self.pickle_file = 'model/heuristic_lr_model.pkl'
|
|
if os.path.exists(self.pickle_file):
|
|
with open(self.pickle_file, 'rb') as f:
|
|
self.lr = pickle.load(f)
|
|
|
|
def training(self, params):
|
|
data = data_mgr.loader('opt/' + params.train_file)
|
|
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
|
|
y_fit = np.array([data[1]]).T
|
|
self.lr.fit(x_fit, y_fit)
|
|
|
|
if params.save:
|
|
if not os.path.exists('model'):
|
|
os.mkdir('model')
|
|
with open(self.pickle_file, 'wb') as f:
|
|
pickle.dump(self.lr, f)
|
|
|
|
y_predict = self.lr.predict(x_fit)
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([y_fit, y_predict]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
|
|
|
|
def testing(self, params):
|
|
data = data_mgr.loader('opt/' + params.test_file)
|
|
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
|
|
y_fit = np.array([data[1]]).T
|
|
|
|
y_predict = self.lr.predict(x_fit)
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([y_fit, y_predict]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
|
|
|
|
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
|
|
return self.lr.predict(np.array(self.heuristic_genetic(cp_points, cp_nozzle)).reshape(1, -1))
|
|
|
|
def heuristic_genetic(self, cp_points, cp_nozzle):
|
|
nozzle_points, nozzle_component_points = defaultdict(int), defaultdict(list)
|
|
for idx, nozzle in cp_nozzle.items():
|
|
if cp_points[idx] == 0:
|
|
continue
|
|
nozzle_points[nozzle] += cp_points[idx]
|
|
|
|
nozzle_component_points[cp_nozzle[idx]] = [0] * len(cp_points)
|
|
|
|
for idx, (part_index, points) in enumerate(cp_points.items()):
|
|
nozzle_component_points[cp_nozzle[part_index]][idx] = points
|
|
|
|
nl = sum(cp_points.values()) # num of placement points
|
|
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
|
|
|
|
# assignments of nozzles to heads
|
|
wl = 0 # num of workload
|
|
total_heads = (1 + ul) * max_head_index - len(nozzle_points)
|
|
nozzle_heads = defaultdict(int)
|
|
for nozzle in nozzle_points.keys():
|
|
if nozzle_points[nozzle] == 0:
|
|
continue
|
|
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / nl * total_heads)
|
|
nozzle_heads[nozzle] += 1
|
|
|
|
total_heads = (1 + ul) * max_head_index
|
|
for heads in nozzle_heads.values():
|
|
total_heads -= heads
|
|
|
|
while True:
|
|
nozzle = max(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x])
|
|
if total_heads == 0:
|
|
break
|
|
nozzle_heads[nozzle] += 1
|
|
total_heads -= 1
|
|
|
|
# averagely assign placements to heads
|
|
heads_placement = []
|
|
for nozzle in nozzle_heads.keys():
|
|
points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle])
|
|
|
|
heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])]
|
|
nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points)
|
|
for idx in range(len(heads_placement) - 1, -1, -1):
|
|
if nozzle_points[nozzle] <= 0:
|
|
break
|
|
nozzle_points[nozzle] -= 1
|
|
heads_placement[idx][1] += 1
|
|
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
|
|
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
|
|
for idx in range(len(heads_placement) // max_head_index):
|
|
wl += heads_placement[idx][1]
|
|
|
|
# the number of pick-up operations
|
|
# (under the assumption of the number of feeder available for each comp. type is equal 1)
|
|
pl = 0
|
|
heads_placement_points = [0 for _ in range(max_head_index)]
|
|
while True:
|
|
head_assign_point = []
|
|
for head in range(max_head_index):
|
|
if heads_placement_points[head] != 0 or heads_placement[head] == 0:
|
|
continue
|
|
|
|
nozzle, points = heads_placement[head]
|
|
max_comp_index = np.argmax(nozzle_component_points[nozzle])
|
|
|
|
heads_placement_points[head] = min(points, nozzle_component_points[nozzle][max_comp_index])
|
|
nozzle_component_points[nozzle][max_comp_index] -= heads_placement_points[head]
|
|
|
|
head_assign_point.append(heads_placement_points[head])
|
|
|
|
min_points_list = list(filter(lambda x: x > 0, heads_placement_points))
|
|
if len(min_points_list) == 0 or len(head_assign_point) == 0:
|
|
break
|
|
|
|
pl += max(head_assign_point)
|
|
|
|
for head in range(max_head_index):
|
|
heads_placement[head][1] -= min(min_points_list)
|
|
heads_placement_points[head] -= min(min_points_list)
|
|
|
|
return [nl, wl, ul]
|
|
|
|
|
|
class RegressionEstimator(Estimator):
|
|
def __init__(self):
|
|
super().__init__()
|
|
|
|
self.lr = LinearRegression()
|
|
self.pickle_file = 'model/params_lr_model.pkl'
|
|
if os.path.exists(self.pickle_file):
|
|
with open(self.pickle_file, 'rb') as f:
|
|
self.lr = pickle.load(f)
|
|
|
|
def training(self, params):
|
|
data = data_mgr.loader('opt/' + params.train_file)
|
|
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
|
|
y_fit = np.array([data[1]]).T
|
|
self.lr.fit(x_fit, y_fit)
|
|
|
|
if params.save:
|
|
if not os.path.exists('model'):
|
|
os.mkdir('model')
|
|
with open(self.pickle_file, 'wb') as f:
|
|
pickle.dump(self.lr, f)
|
|
|
|
y_predict = self.lr.predict(x_fit)
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([y_fit, y_predict]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
|
|
|
|
def testing(self, params):
|
|
data = data_mgr.loader('opt/' + params.test_file)
|
|
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
|
|
y_fit = np.array([data[1]]).T
|
|
|
|
y_predict = self.lr.predict(x_fit)
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([y_fit, y_predict]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
|
|
|
|
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
|
|
return self.lr.predict(np.array(self.heuristic_reconfig(cp_points, cp_nozzle)).reshape(1, -1))
|
|
|
|
def heuristic_reconfig(self, cp_points, cp_nozzle):
|
|
task_block_number, total_point_number = 0, sum(cp_points.values())
|
|
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
|
|
|
|
for part, points in cp_points.items():
|
|
nozzle_points[cp_nozzle[part]] += points
|
|
nozzle_heads[cp_nozzle[part]] = 1
|
|
remaining_head = max_head_index - len(nozzle_heads)
|
|
|
|
nozzle_fraction = []
|
|
for nozzle, points in nozzle_points.items():
|
|
val = remaining_head * points / total_point_number
|
|
nozzle_heads[nozzle] += math.floor(val)
|
|
nozzle_fraction.append([nozzle, val - math.floor(val)])
|
|
|
|
remaining_head = max_head_index - sum(nozzle_heads.values())
|
|
sorted(nozzle_fraction, key=lambda x: x[1])
|
|
nozzle_fraction_index = 0
|
|
while remaining_head > 0:
|
|
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
|
|
remaining_head -= 1
|
|
|
|
for nozzle, heads_number in nozzle_heads.items():
|
|
task_block_number = max(task_block_number, math.ceil(nozzle_points[nozzle] / heads_number))
|
|
|
|
return [total_point_number, task_block_number]
|
|
|
|
|
|
class SVREstimator(Estimator):
|
|
def __init__(self):
|
|
super().__init__()
|
|
|
|
# === symbiotic organism search parameter ===
|
|
# population of meta heuristic: 20
|
|
# number of iteration: 100
|
|
self.population_size = 20
|
|
self.num_iteration = 100
|
|
self.w_quart = 1.5
|
|
|
|
# === support vector regression parameters ===
|
|
self.kernel_func = "rbf"
|
|
self.C_range = [0.1, 10]
|
|
self.gamma_range = [0.01, 0.5]
|
|
self.epsilon_range = [0.01, 0.1]
|
|
self.benefit_factor = [1, 2]
|
|
|
|
# number of folds: 5
|
|
self.num_folds = 5
|
|
self.svr_list = [SVR() for _ in range(self.num_folds + 1)]
|
|
|
|
for i in range(self.num_folds + 1):
|
|
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
|
|
if not os.path.exists(pickle_file):
|
|
continue
|
|
with open(pickle_file, 'rb') as f:
|
|
self.svr_list[i] = pickle.load(f)
|
|
|
|
self.pbar = tqdm(total=self.num_iteration * self.num_folds * self.population_size)
|
|
self.pbar.set_description('svr training process')
|
|
|
|
def training(self, params):
|
|
data = data_mgr.loader('opt/' + params.train_file)
|
|
Q1, Q3 = np.percentile(np.array(data[1]), 25), np.percentile(np.array(data[1]), 75)
|
|
indices = [i for i in range(len(data[1])) if Q1 - self.w_quart * (Q3 - Q1) <= data[1][i] <= Q3 + self.w_quart * (Q3 - Q1)]
|
|
data[0], data[1] = [data[0][i] for i in indices], [data[1][i] for i in indices]
|
|
|
|
self.svr_list = []
|
|
division = len(data[0]) // self.num_folds
|
|
|
|
for cnt in range(self.num_folds):
|
|
x_train, y_train = data[0], data[1]
|
|
x_train = [[sum(x_train[i][0].values()), x_train[i][2], x_train[i][3]] for i in range(len(data[0])) if
|
|
not cnt * division <= i < (cnt + 1) * division]
|
|
y_train = [y_train[i] for i in range(len(data[0])) if not cnt * division <= i < (cnt + 1) * division]
|
|
|
|
self.svr_list.append(self.sos_svr_training(x_train, y_train))
|
|
|
|
final_input, final_output = [], []
|
|
for cnt in range(self.num_folds):
|
|
x_valid = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0])) if
|
|
cnt * division <= i < (cnt + 1) * division]
|
|
|
|
final_input.extend([[v] for v in self.svr_list[cnt].predict(x_valid)])
|
|
final_output.extend(
|
|
[data[1][i] for i in range(len(data[0])) if cnt * division <= i < (cnt + 1) * division])
|
|
self.svr_list.append(self.sos_svr_training(final_input, final_output))
|
|
|
|
if params.save:
|
|
for i in range(self.num_folds + 1):
|
|
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
|
|
with open(pickle_file, 'wb') as f:
|
|
pickle.dump(self.svr_list[i], f)
|
|
|
|
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
|
|
predict_y = []
|
|
for cnt in range(self.num_folds):
|
|
predict_y.extend(self.svr_list[cnt].predict(predict_x))
|
|
|
|
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
|
|
predict_val = self.svr_list[-1].predict(input)
|
|
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([data[1], predict_val]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
|
|
|
|
def sos_svr_training(self, x_train, y_train):
|
|
population = []
|
|
for _ in range(self.population_size):
|
|
svr_param = [random.uniform(self.C_range[0], self.C_range[1]),
|
|
random.uniform(self.gamma_range[0], self.gamma_range[1]),
|
|
random.uniform(self.epsilon_range[0], self.epsilon_range[1])]
|
|
population.append(SVR(kernel=self.kernel_func, C=svr_param[0], gamma=svr_param[1], epsilon=svr_param[2]))
|
|
|
|
population_val = []
|
|
for individual in population:
|
|
population_val.append(self.svr_error(individual, x_train, y_train))
|
|
|
|
for _ in range(self.num_iteration):
|
|
best_svr = population[np.argmin(population_val)]
|
|
for i in range(self.population_size):
|
|
# === mutualism phase ===
|
|
while True:
|
|
j = random.randint(0, self.population_size - 1)
|
|
if i != j:
|
|
break
|
|
|
|
Mv_C, Mv_gamma, Mv_epsilon = (population[i].C + population[j].C) / 2, (
|
|
population[i].gamma + population[j].gamma) / 2, (
|
|
population[i].epsilon + population[j].epsilon) / 2
|
|
|
|
for idx, svr in zip([i, j], [population[i], population[j]]):
|
|
new_C = svr.C + random.random() * (best_svr.C - Mv_C * random.choice(self.benefit_factor))
|
|
new_gamma = svr.gamma + random.random() * (
|
|
best_svr.gamma - Mv_gamma * random.choice(self.benefit_factor))
|
|
new_epsilon = svr.epsilon + random.random() * (
|
|
best_svr.epsilon - Mv_epsilon * random.choice(self.benefit_factor))
|
|
|
|
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
|
|
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
|
|
new_svr_val = self.svr_error(new_svr, x_train, y_train)
|
|
|
|
if new_svr_val < population_val[idx]:
|
|
population[idx], population_val[idx] = new_svr, new_svr_val
|
|
|
|
# === commensalism phase ===
|
|
while True:
|
|
j = random.randint(0, self.population_size - 1)
|
|
if i != j:
|
|
break
|
|
|
|
new_C = population[i].C + random.uniform(-1, 1) * (best_svr.C - population[j].C)
|
|
new_gamma = population[i].gamma + random.uniform(-1, 1) * (best_svr.gamma - population[j].gamma)
|
|
new_epsilon = population[i].epsilon + random.uniform(-1, 1) * (
|
|
best_svr.epsilon - population[j].epsilon)
|
|
|
|
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
|
|
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
|
|
new_svr_val = self.svr_error(new_svr, x_train, y_train)
|
|
|
|
if new_svr_val < population_val[j]:
|
|
population[j], population_val[j] = new_svr, new_svr_val
|
|
|
|
# === parasitism phase ===
|
|
while True:
|
|
j = random.randint(0, self.population_size - 1)
|
|
if i != j:
|
|
break
|
|
new_svr = copy.deepcopy(population[j])
|
|
idx = random.randint(0, 2)
|
|
if idx == 0:
|
|
new_svr.C = random.uniform(self.C_range[0], self.C_range[1])
|
|
elif idx == 1:
|
|
new_svr.gamma = random.uniform(self.gamma_range[0], self.gamma_range[1])
|
|
else:
|
|
new_svr.epsilon = random.uniform(self.epsilon_range[0], self.epsilon_range[1])
|
|
|
|
new_svr_val = self.svr_error(new_svr, x_train, y_train)
|
|
if new_svr_val < population_val[j]:
|
|
population[j], population_val[j] = new_svr, new_svr_val
|
|
self.pbar.update(1)
|
|
|
|
return population[np.argmin(population_val)]
|
|
|
|
def testing(self, params):
|
|
data = data_mgr.loader('opt/' + params.test_file)
|
|
|
|
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
|
|
predict_y = []
|
|
for cnt in range(self.num_folds):
|
|
predict_y.extend(self.svr_list[cnt].predict(predict_x))
|
|
|
|
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
|
|
predict_val = self.svr_list[-1].predict(input)
|
|
|
|
pred_error = np.array([])
|
|
for t1, t2 in np.nditer([data[1], predict_val]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
print('--------------------------------------')
|
|
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
|
|
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
|
|
|
|
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
|
|
pass
|
|
|
|
def svr_error(self, svr, x_train, y_train):
|
|
num_data = len(x_train)
|
|
num_division = len(x_train) // self.num_folds
|
|
|
|
pred_error = np.array([])
|
|
for cnt in range(self.num_folds):
|
|
x_fit = [x_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
|
|
y_fit = [y_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
|
|
svr.fit(x_fit, y_fit)
|
|
|
|
x_valid = [x_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
|
|
y_valid = [y_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
|
|
|
|
for t1, t2 in np.nditer([y_valid, svr.predict(x_valid)]):
|
|
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
|
|
|
|
return np.average(pred_error)
|
|
|
|
|
|
def exact_assembly_time(pcb_data, component_data):
|
|
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
|
|
hinter=False)
|
|
placement_result, head_sequence_result = greedy_placement_route_generation(component_data, pcb_data,
|
|
component_result, cycle_result,
|
|
feeder_slot_result, hinter=False)
|
|
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
|
|
placement_result, head_sequence_result)
|
|
# regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
|
|
# info.pickup_counter, info.total_points]]
|
|
# return self.lr.predict(regression_info)[0, 0]
|
|
return info.total_time
|
|
|
|
|
|
if __name__ == '__main__':
|
|
warnings.simplefilter(action='ignore', category=FutureWarning)
|
|
|
|
parser = argparse.ArgumentParser(description='network training implementation')
|
|
# parser.add_argument('--train', default=True, type=bool, help='determine whether training the network')
|
|
parser.add_argument('--save', default=True, type=bool,
|
|
help='determine whether saving the parameters of network, linear regression model, etc.')
|
|
parser.add_argument('--overwrite', default=False, type=bool,
|
|
help='determine whether overwriting the training and testing data')
|
|
parser.add_argument('--train_file', default='train_data - bp.txt', type=str, help='training file path')
|
|
parser.add_argument('--test_file', default='test_data - bp.txt', type=str, help='testing file path')
|
|
parser.add_argument('--num_epochs', default=10000, type=int, help='number of epochs for training process')
|
|
parser.add_argument('--batch_size', default=1000, type=int, help='size of training batch')
|
|
parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
|
|
parser.add_argument('--model', default='neural-network', help='method for assembly time estimation')
|
|
|
|
params = parser.parse_args()
|
|
|
|
data_mgr = DataMgr()
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
if params.overwrite:
|
|
file = {params.train_file: params.batch_size,
|
|
params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
|
|
for file_name, file_batch_size in file.items():
|
|
with open('opt/' + file_name, 'a') as f:
|
|
for _ in range(int(file_batch_size)):
|
|
|
|
mode = file_name.split('.')[0].split('_')[0]
|
|
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
|
|
# data_mgr.remover() # remove the last saved data
|
|
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
|
|
|
|
info = base_optimizer(1, pcb_data, component_data,
|
|
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
|
|
method='feeder-scan', hinter=True)
|
|
|
|
data_mgr.recorder(f, info, pcb_data, component_data)
|
|
f.close()
|
|
|
|
estimator = NeuralEstimator()
|
|
|
|
estimator.training(params)
|
|
estimator.testing(params)
|
|
|
|
|