修改文件名属性

This commit is contained in:
2024-06-05 22:10:21 +08:00
parent 7c9a900b95
commit cbeba48da0
21 changed files with 1466 additions and 839 deletions

View File

@ -1,3 +1,6 @@
import copy
import random
from generator import *
from base_optimizer.optimizer_interface import *
@ -34,117 +37,172 @@ class LSTMNet(torch.nn.Module):
class Estimator:
def __init__(self, task_block_weight=None):
def __init__(self):
self.data_mgr = DataMgr()
@staticmethod
def training(self, params):
pass
@staticmethod
def testing(self, params):
pass
@staticmethod
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
class NeuralEstimator(Estimator):
def __init__(self):
super().__init__()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device)
self.net.load_state_dict(torch.load('model/net_model.pth'))
self.task_block_weight = task_block_weight
self.net_file = 'model/net_model.pth'
if os.path.exists(self.net_file):
self.net.load_state_dict(torch.load(self.net_file))
with open('model/lr_model.pkl', 'rb') as f:
self.lr = pickle.load(f)
def init_weights(self):
for m in self.net.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
def convert(self, pcb_data, component_data, assignment_result):
machine_num, component_num = len(assignment_result), len(component_data)
def training(self, params):
self.init_weights() # 初始化参数
data = data_mgr.loader('opt/' + params.train_file)
x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()]))
y_train = np.array(data[1][::data_mgr.get_update_round()])
component_machine_index = [0 for _ in range(component_num)]
machine_points = [[[] for _ in range(component_num)] for _ in range(machine_num)]
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
component2idx = defaultdict(int)
for i, data in component_data.iterrows():
component2idx[data.part] = i
optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
for i in range(len(pcb_data)):
part_index = component2idx[pcb_data.iat[i, 5]]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]):
component_machine_index[part_index] += 1
machine_index += 1
else:
loss_func = torch.nn.MSELoss()
for epoch in range(params.num_epochs):
pred = self.net(x_train)
loss = loss_func(pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# scheduler.step()
if epoch % 100 == 0:
print('Epoch: ', epoch, ', Loss: ', loss.item())
if loss.item() < 1e-4:
break
for _, data in pcb_data.iterrows():
part_index = component2idx[data.part]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]):
component_machine_index[part_index] += 1
machine_index += 1
else:
break
machine_points[machine_index][part_index].append([data.x, data.y])
net_predict = self.net(x_train).view(-1)
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
res = []
for machine_index in range(machine_num):
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
cp_width, cp_height = defaultdict(float), defaultdict(float)
board_right_pos, board_left_pos, board_top_pos, board_bottom_pos = None, None, None, None
pred_error = np.array([])
for t1, t2 in np.nditer([pred_time, real_time]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
for part_index in range(component_num):
if assignment_result[machine_index][part_index] == 0:
continue
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
cp_points[part_index] = assignment_result[machine_index][part_index]
cp_nozzle[part_index] = component_data.iloc[part_index]['nz']
mse = np.linalg.norm((net_predict - y_train.view(-1)).cpu().detach().numpy())
print(f'mean square error for training data result : {mse: 2f} ')
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
torch.save(self.net.state_dict(), self.net_file)
# self.net.load_state_dict(torch.load(self.net_file))
cp_right_pos, cp_left_pos = max([p[0] for p in machine_points[machine_index][part_index]]), min(
[p[0] for p in machine_points[machine_index][part_index]])
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
cp_top_pos, cp_bottom_pos = max([p[1] for p in machine_points[machine_index][part_index]]), min(
[p[1] for p in machine_points[machine_index][part_index]])
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
cp_width[part_index] = cp_right_pos - cp_left_pos
cp_height[part_index] = cp_top_pos - cp_bottom_pos
self.net.eval()
with torch.no_grad():
pred_time = self.net(x_test).view(-1).cpu().detach().numpy()
# x_test = x_test.cpu().detach().numpy()
if board_right_pos is None or cp_right_pos > board_right_pos:
board_right_pos = cp_right_pos
over_set = []
pred_idx, pred_error = 0, np.array([])
for t1, t2 in np.nditer([pred_time, y_test.reshape(-1)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
if board_left_pos is None or cp_left_pos < board_left_pos:
board_left_pos = cp_left_pos
if pred_error[-1] > 5:
over_set.append(pred_idx + 1)
print(f'\033[0;31;31midx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {pred_error[-1]: .3f}\033[0m')
# else:
# print(f'idx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, gap: {pred_error[-1]: .3f}')
if board_top_pos is None or cp_top_pos > board_top_pos:
board_top_pos = cp_top_pos
pred_idx += 1
if board_bottom_pos is None or cp_bottom_pos < board_bottom_pos:
board_bottom_pos = cp_bottom_pos
print('over:', over_set)
print('size:', len(over_set))
res.append([cp_points, cp_nozzle, cp_width, cp_height, board_right_pos - board_left_pos,
board_top_pos - board_bottom_pos])
return res
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .3f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .3f}% ')
def neural_network(self, cp_points, cp_nozzle, board_width, board_height):
mse = np.linalg.norm(pred_time - y_test.reshape(-1))
print(f'mean square error for test data result : {mse: 2f} ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
assert board_width is not None and board_height is not None
encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
return self.net(encoding)[0, 0].item()
def heuristic_reconfiguration(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
nozzle_points[cp_nozzle[part]] += points
nozzle_heads[cp_nozzle[part]] = 1
remaining_head = max_head_index - len(nozzle_heads)
class HeuristicEstimator(Estimator):
def __init__(self):
super().__init__()
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
self.lr = LinearRegression()
self.pickle_file = 'model/heuristic_lr_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
self.lr.fit(x_fit, y_fit)
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(self.task_block_weight, math.ceil(nozzle_points[nozzle] / heads_number))
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
with open(self.pickle_file, 'wb') as f:
pickle.dump(self.lr, f)
return (t_pick + t_place) * total_point_number + task_block_number * self.task_block_weight
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_genetic(cp_points, cp_nozzle)).reshape(1, -1))
def heuristic_genetic(self, cp_points, cp_nozzle):
nozzle_points, nozzle_component_points = defaultdict(int), defaultdict(list)
@ -158,7 +216,7 @@ class Estimator:
for idx, (part_index, points) in enumerate(cp_points.items()):
nozzle_component_points[cp_nozzle[part_index]][idx] = points
total_points = sum(cp_points.values()) # num of placement points
nl = sum(cp_points.values()) # num of placement points
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
# assignments of nozzles to heads
@ -168,7 +226,7 @@ class Estimator:
for nozzle in nozzle_points.keys():
if nozzle_points[nozzle] == 0:
continue
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / total_points * total_heads)
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / nl * total_heads)
nozzle_heads[nozzle] += 1
total_heads = (1 + ul) * max_head_index
@ -195,6 +253,9 @@ class Estimator:
nozzle_points[nozzle] -= 1
heads_placement[idx][1] += 1
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
# the number of pick-up operations
# (under the assumption of the number of feeder available for each comp. type is equal 1)
@ -224,18 +285,342 @@ class Estimator:
heads_placement[head][1] -= min(min_points_list)
heads_placement_points[head] -= min(min_points_list)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
return [nl, wl, ul]
return T_pp * total_points + T_tr * wl + T_nc * ul + T_pl * pl
def linear_regression(self, pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
class RegressionEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/params_lr_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
self.lr.fit(x_fit, y_fit)
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
with open(self.pickle_file, 'wb') as f:
pickle.dump(self.lr, f)
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
pred_error = np.array([])
for t1, t2 in np.nditer([y_fit, y_predict]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_reconfig(cp_points, cp_nozzle)).reshape(1, -1))
def heuristic_reconfig(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
nozzle_points[cp_nozzle[part]] += points
nozzle_heads[cp_nozzle[part]] = 1
remaining_head = max_head_index - len(nozzle_heads)
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(task_block_number, math.ceil(nozzle_points[nozzle] / heads_number))
return [total_point_number, task_block_number]
class SVREstimator(Estimator):
def __init__(self):
super().__init__()
# === symbiotic organism search parameter ===
# population of meta heuristic: 20
# number of iteration: 100
self.population_size = 20
self.num_iteration = 100
self.w_quart = 1.5
# === support vector regression parameters ===
self.kernel_func = "rbf"
self.C_range = [0.1, 10]
self.gamma_range = [0.01, 0.5]
self.epsilon_range = [0.01, 0.1]
self.benefit_factor = [1, 2]
# number of folds: 5
self.num_folds = 5
self.svr_list = [SVR() for _ in range(self.num_folds + 1)]
for i in range(self.num_folds + 1):
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
if not os.path.exists(pickle_file):
continue
with open(pickle_file, 'rb') as f:
self.svr_list[i] = pickle.load(f)
self.pbar = tqdm(total=self.num_iteration * self.num_folds * self.population_size)
self.pbar.set_description('svr training process')
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
Q1, Q3 = np.percentile(np.array(data[1]), 25), np.percentile(np.array(data[1]), 75)
indices = [i for i in range(len(data[1])) if Q1 - self.w_quart * (Q3 - Q1) <= data[1][i] <= Q3 + self.w_quart * (Q3 - Q1)]
data[0], data[1] = [data[0][i] for i in indices], [data[1][i] for i in indices]
self.svr_list = []
division = len(data[0]) // self.num_folds
for cnt in range(self.num_folds):
x_train, y_train = data[0], data[1]
x_train = [[sum(x_train[i][0].values()), x_train[i][2], x_train[i][3]] for i in range(len(data[0])) if
not cnt * division <= i < (cnt + 1) * division]
y_train = [y_train[i] for i in range(len(data[0])) if not cnt * division <= i < (cnt + 1) * division]
self.svr_list.append(self.sos_svr_training(x_train, y_train))
final_input, final_output = [], []
for cnt in range(self.num_folds):
x_valid = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0])) if
cnt * division <= i < (cnt + 1) * division]
final_input.extend([[v] for v in self.svr_list[cnt].predict(x_valid)])
final_output.extend(
[data[1][i] for i in range(len(data[0])) if cnt * division <= i < (cnt + 1) * division])
self.svr_list.append(self.sos_svr_training(final_input, final_output))
if params.save:
for i in range(self.num_folds + 1):
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
with open(pickle_file, 'wb') as f:
pickle.dump(self.svr_list[i], f)
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
predict_y = []
for cnt in range(self.num_folds):
predict_y.extend(self.svr_list[cnt].predict(predict_x))
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
pred_error = np.array([])
for t1, t2 in np.nditer([data[1], predict_val]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for train data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for train data : {np.max(pred_error): .2f}% ')
def sos_svr_training(self, x_train, y_train):
population = []
for _ in range(self.population_size):
svr_param = [random.uniform(self.C_range[0], self.C_range[1]),
random.uniform(self.gamma_range[0], self.gamma_range[1]),
random.uniform(self.epsilon_range[0], self.epsilon_range[1])]
population.append(SVR(kernel=self.kernel_func, C=svr_param[0], gamma=svr_param[1], epsilon=svr_param[2]))
population_val = []
for individual in population:
population_val.append(self.svr_error(individual, x_train, y_train))
for _ in range(self.num_iteration):
best_svr = population[np.argmin(population_val)]
for i in range(self.population_size):
# === mutualism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
Mv_C, Mv_gamma, Mv_epsilon = (population[i].C + population[j].C) / 2, (
population[i].gamma + population[j].gamma) / 2, (
population[i].epsilon + population[j].epsilon) / 2
for idx, svr in zip([i, j], [population[i], population[j]]):
new_C = svr.C + random.random() * (best_svr.C - Mv_C * random.choice(self.benefit_factor))
new_gamma = svr.gamma + random.random() * (
best_svr.gamma - Mv_gamma * random.choice(self.benefit_factor))
new_epsilon = svr.epsilon + random.random() * (
best_svr.epsilon - Mv_epsilon * random.choice(self.benefit_factor))
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[idx]:
population[idx], population_val[idx] = new_svr, new_svr_val
# === commensalism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
new_C = population[i].C + random.uniform(-1, 1) * (best_svr.C - population[j].C)
new_gamma = population[i].gamma + random.uniform(-1, 1) * (best_svr.gamma - population[j].gamma)
new_epsilon = population[i].epsilon + random.uniform(-1, 1) * (
best_svr.epsilon - population[j].epsilon)
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[j]:
population[j], population_val[j] = new_svr, new_svr_val
# === parasitism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
new_svr = copy.deepcopy(population[j])
idx = random.randint(0, 2)
if idx == 0:
new_svr.C = random.uniform(self.C_range[0], self.C_range[1])
elif idx == 1:
new_svr.gamma = random.uniform(self.gamma_range[0], self.gamma_range[1])
else:
new_svr.epsilon = random.uniform(self.epsilon_range[0], self.epsilon_range[1])
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[j]:
population[j], population_val[j] = new_svr, new_svr_val
self.pbar.update(1)
return population[np.argmin(population_val)]
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
predict_y = []
for cnt in range(self.num_folds):
predict_y.extend(self.svr_list[cnt].predict(predict_x))
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
pred_error = np.array([])
for t1, t2 in np.nditer([data[1], predict_val]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
def svr_error(self, svr, x_train, y_train):
num_data = len(x_train)
num_division = len(x_train) // self.num_folds
pred_error = np.array([])
for cnt in range(self.num_folds):
x_fit = [x_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
y_fit = [y_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
svr.fit(x_fit, y_fit)
x_valid = [x_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
y_valid = [y_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
for t1, t2 in np.nditer([y_valid, svr.predict(x_valid)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
return np.average(pred_error)
def exact_assembly_time(pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
placement_result, head_sequence_result = greedy_placement_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence_result)
# regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
# info.pickup_counter, info.total_points]]
# return self.lr.predict(regression_info)[0, 0]
return info.total_time
if __name__ == '__main__':
warnings.simplefilter(action='ignore', category=FutureWarning)
parser = argparse.ArgumentParser(description='network training implementation')
# parser.add_argument('--train', default=True, type=bool, help='determine whether training the network')
parser.add_argument('--save', default=True, type=bool,
help='determine whether saving the parameters of network, linear regression model, etc.')
parser.add_argument('--overwrite', default=False, type=bool,
help='determine whether overwriting the training and testing data')
parser.add_argument('--train_file', default='train_data - bp.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data - bp.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=10000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=1000, type=int, help='size of training batch')
parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
parser.add_argument('--model', default='neural-network', help='method for assembly time estimation')
params = parser.parse_args()
data_mgr = DataMgr()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if params.overwrite:
file = {params.train_file: params.batch_size,
params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
for file_name, file_batch_size in file.items():
with open('opt/' + file_name, 'a') as f:
for _ in range(int(file_batch_size)):
mode = file_name.split('.')[0].split('_')[0]
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
# data_mgr.remover() # remove the last saved data
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
info = base_optimizer(1, pcb_data, component_data,
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method='feeder-scan', hinter=True)
data_mgr.recorder(f, info, pcb_data, component_data)
f.close()
estimator = NeuralEstimator()
estimator.training(params)
estimator.testing(params)
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result)
regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
info.pickup_counter, info.total_points]]
return self.lr.predict(regression_info)[0, 0]