from generator import * from base_optimizer.optimizer_interface import * class Net(torch.nn.Module): def __init__(self, input_size, hidden_size=1000, output_size=1): super(Net, self).__init__() self.fc1 = torch.nn.Linear(input_size, hidden_size) self.relu = torch.nn.ReLU() # 激活函数 self.fc2 = torch.nn.Linear(hidden_size, hidden_size) # self.relu1 = torch.nn.ReLU() # 激活函数 self.fc3 = torch.nn.Linear(hidden_size, output_size) def forward(self, x): x = self.fc1(x) # x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc3(x) return x class LSTMNet(torch.nn.Module): def __init__(self, input_size, hidden_size=256, output_size=1, num_layers=1): super(LSTMNet, self).__init__() self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers) self.fc = torch.nn.Linear(hidden_size, output_size) def forward(self, x): x, _ = self.lstm(x) # x is input with size (seq_len, batch_size, input_size) x = self.fc(x) return x[-1, :, ] class Estimator: def __init__(self, task_block_weight=None): self.data_mgr = DataMgr() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device) self.net.load_state_dict(torch.load('model/net_model.pth')) self.task_block_weight = task_block_weight with open('model/lr_model.pkl', 'rb') as f: self.lr = pickle.load(f) def convert(self, pcb_data, component_data, assignment_result): machine_num, component_num = len(assignment_result), len(component_data) component_machine_index = [0 for _ in range(component_num)] machine_points = [[[] for _ in range(component_num)] for _ in range(machine_num)] component2idx = defaultdict(int) for i, data in component_data.iterrows(): component2idx[data.part] = i for i in range(len(pcb_data)): part_index = component2idx[pcb_data.iat[i, 5]] while True: machine_index = component_machine_index[part_index] if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]): component_machine_index[part_index] += 1 machine_index += 1 else: break for _, data in pcb_data.iterrows(): part_index = component2idx[data.part] while True: machine_index = component_machine_index[part_index] if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]): component_machine_index[part_index] += 1 machine_index += 1 else: break machine_points[machine_index][part_index].append([data.x, data.y]) res = [] for machine_index in range(machine_num): cp_points, cp_nozzle = defaultdict(int), defaultdict(str) cp_width, cp_height = defaultdict(float), defaultdict(float) board_right_pos, board_left_pos, board_top_pos, board_bottom_pos = None, None, None, None for part_index in range(component_num): if assignment_result[machine_index][part_index] == 0: continue cp_points[part_index] = assignment_result[machine_index][part_index] cp_nozzle[part_index] = component_data.iloc[part_index]['nz'] cp_right_pos, cp_left_pos = max([p[0] for p in machine_points[machine_index][part_index]]), min( [p[0] for p in machine_points[machine_index][part_index]]) cp_top_pos, cp_bottom_pos = max([p[1] for p in machine_points[machine_index][part_index]]), min( [p[1] for p in machine_points[machine_index][part_index]]) cp_width[part_index] = cp_right_pos - cp_left_pos cp_height[part_index] = cp_top_pos - cp_bottom_pos if board_right_pos is None or cp_right_pos > board_right_pos: board_right_pos = cp_right_pos if board_left_pos is None or cp_left_pos < board_left_pos: board_left_pos = cp_left_pos if board_top_pos is None or cp_top_pos > board_top_pos: board_top_pos = cp_top_pos if board_bottom_pos is None or cp_bottom_pos < board_bottom_pos: board_bottom_pos = cp_bottom_pos res.append([cp_points, cp_nozzle, cp_width, cp_height, board_right_pos - board_left_pos, board_top_pos - board_bottom_pos]) return res def neural_network(self, cp_points, cp_nozzle, board_width, board_height): encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height)) encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda") return self.net(encoding)[0, 0].item() def heuristic_reconfiguration(self, cp_points, cp_nozzle): task_block_number, total_point_number = 0, sum(cp_points.values()) nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int) for part, points in cp_points.items(): nozzle_points[cp_nozzle[part]] += points nozzle_heads[cp_nozzle[part]] = 1 remaining_head = max_head_index - len(nozzle_heads) nozzle_fraction = [] for nozzle, points in nozzle_points.items(): val = remaining_head * points / total_point_number nozzle_heads[nozzle] += math.floor(val) nozzle_fraction.append([nozzle, val - math.floor(val)]) remaining_head = max_head_index - sum(nozzle_heads.values()) sorted(nozzle_fraction, key=lambda x: x[1]) nozzle_fraction_index = 0 while remaining_head > 0: nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1 remaining_head -= 1 for nozzle, heads_number in nozzle_heads.items(): task_block_number = max(self.task_block_weight, math.ceil(nozzle_points[nozzle] / heads_number)) return (t_pick + t_place) * total_point_number + task_block_number * self.task_block_weight def heuristic_genetic(self, cp_points, cp_nozzle): nozzle_points, nozzle_component_points = defaultdict(int), defaultdict(list) for idx, nozzle in cp_nozzle.items(): if cp_points[idx] == 0: continue nozzle_points[nozzle] += cp_points[idx] nozzle_component_points[cp_nozzle[idx]] = [0] * len(cp_points) for idx, (part_index, points) in enumerate(cp_points.items()): nozzle_component_points[cp_nozzle[part_index]][idx] = points total_points = sum(cp_points.values()) # num of placement points ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set # assignments of nozzles to heads wl = 0 # num of workload total_heads = (1 + ul) * max_head_index - len(nozzle_points) nozzle_heads = defaultdict(int) for nozzle in nozzle_points.keys(): if nozzle_points[nozzle] == 0: continue nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / total_points * total_heads) nozzle_heads[nozzle] += 1 total_heads = (1 + ul) * max_head_index for heads in nozzle_heads.values(): total_heads -= heads while True: nozzle = max(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x]) if total_heads == 0: break nozzle_heads[nozzle] += 1 total_heads -= 1 # averagely assign placements to heads heads_placement = [] for nozzle in nozzle_heads.keys(): points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle]) heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])] nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points) for idx in range(len(heads_placement) - 1, -1, -1): if nozzle_points[nozzle] <= 0: break nozzle_points[nozzle] -= 1 heads_placement[idx][1] += 1 heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True) # the number of pick-up operations # (under the assumption of the number of feeder available for each comp. type is equal 1) pl = 0 heads_placement_points = [0 for _ in range(max_head_index)] while True: head_assign_point = [] for head in range(max_head_index): if heads_placement_points[head] != 0 or heads_placement[head] == 0: continue nozzle, points = heads_placement[head] max_comp_index = np.argmax(nozzle_component_points[nozzle]) heads_placement_points[head] = min(points, nozzle_component_points[nozzle][max_comp_index]) nozzle_component_points[nozzle][max_comp_index] -= heads_placement_points[head] head_assign_point.append(heads_placement_points[head]) min_points_list = list(filter(lambda x: x > 0, heads_placement_points)) if len(min_points_list) == 0 or len(head_assign_point) == 0: break pl += max(head_assign_point) for head in range(max_head_index): heads_placement[head][1] -= min(min_points_list) heads_placement_points[head] -= min(min_points_list) # every max_head_index heads in the non-decreasing order are grouped together as nozzle set for idx in range(len(heads_placement) // max_head_index): wl += heads_placement[idx][1] return T_pp * total_points + T_tr * wl + T_nc * ul + T_pl * pl def linear_regression(self, pcb_data, component_data): component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data, hinter=False) info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result) regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter, info.pickup_counter, info.total_points]] return self.lr.predict(regression_info)[0, 0]