增加超启发式线体优化算法

This commit is contained in:
2024-05-17 22:52:49 +08:00
parent 6fa1f53f69
commit 7c9a900b95
13 changed files with 1731 additions and 1109 deletions

View File

@ -104,7 +104,7 @@ def optimizer_celldivision(pcb_data, component_data, hinter=True):
point_num = len(pcb_data) point_num = len(pcb_data)
component_cell = pd.DataFrame({'index': np.arange(len(component_data)), 'points': np.zeros(len(component_data), dtype=int)}) component_cell = pd.DataFrame({'index': np.arange(len(component_data)), 'points': np.zeros(len(component_data), dtype=int)})
for point_cnt in range(point_num): for point_cnt in range(point_num):
part = pcb_data.loc[point_cnt, 'fdr'].split(' ', 1)[1] part = pcb_data.loc[point_cnt, 'part']
index = np.where(component_data['part'].values == part) index = np.where(component_data['part'].values == part)
component_cell.loc[index[0], 'points'] += 1 component_cell.loc[index[0], 'points'] += 1
component_cell = component_cell[~component_cell['points'].isin([0])] component_cell = component_cell[~component_cell['points'].isin([0])]

View File

@ -29,7 +29,7 @@ head_nozzle = ['' for _ in range(max_head_index)] # 头上已经分配吸嘴
slotf1_pos, slotr1_pos = [-31.267, 44.], [807., 810.545] # F1(前基座最左侧)、R1(后基座最右侧)位置 slotf1_pos, slotr1_pos = [-31.267, 44.], [807., 810.545] # F1(前基座最左侧)、R1(后基座最右侧)位置
fix_camera_pos = [269.531, 694.823] # 固定相机位置 fix_camera_pos = [269.531, 694.823] # 固定相机位置
anc_marker_pos = [336.457, 626.230] # ANC基准点位置 anc_marker_pos = [336.457, 626.230] # ANC基准点位置
stopper_pos = [635.150, 124.738] # 止档块位置 stopper_pos = [535.150, 124.738] # 止档块位置
# 算法权重参数 # 算法权重参数
e_nz_change, e_gang_pick = 4, 0.6 e_nz_change, e_gang_pick = 4, 0.6
@ -48,6 +48,7 @@ nozzle_limit = {'CN065': 6, 'CN040': 6, 'CN220': 6, 'CN400': 6, 'CN140': 6}
# 时间参数 # 时间参数
t_cycle = 0.3 t_cycle = 0.3
t_anc = 0.6
t_pick, t_place = .078, .051 # 贴装/拾取用时 t_pick, t_place = .078, .051 # 贴装/拾取用时
t_nozzle_put, t_nozzle_pick = 0.9, 0.75 # 装卸吸嘴用时 t_nozzle_put, t_nozzle_pick = 0.9, 0.75 # 装卸吸嘴用时
t_nozzle_change = t_nozzle_put + t_nozzle_pick t_nozzle_change = t_nozzle_put + t_nozzle_pick
@ -59,66 +60,22 @@ T_pp, T_tr, T_nc, T_pl = 2, 5, 25, 0
class OptInfo: class OptInfo:
def __init__(self): def __init__(self):
self.placement_time = 0 self.total_time = .0 # 总组装时间
self.total_points = .0 # 总贴装点数
self.cycle_counter = 0 self.pickup_time = .0 # 拾取过程运动时间
self.nozzle_change_counter = 0 self.round_time = .0 # 往返基座/基板运动时间
self.pickup_counter = 0 self.place_time = .0 # 贴装过程运动时间
self.operation_time = .0 # 拾取/贴装/换吸嘴等机械动作用时
self.pickup_movement = 0 self.cycle_counter = 0 # 周期数
self.placement_movement = 0 self.nozzle_change_counter = 0 # 吸嘴更换次数
self.anc_round_counter = 0 # 前往ANC次数
self.pickup_counter = 0 # 拾取次数
self.total_distance = .0 # 总移动路径
def optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, self.place_distance = .0 # 贴装移动路径
nozzle_hinter=False, component_hinter=False, feeder_hinter=False): self.pickup_distance = .0 # 拾取移动路径
if nozzle_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
nozzle_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
nozzle_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
if index == -1:
nozzle_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
nozzle_assign.loc[cycle, 'H{}'.format(head + 1)] = component_data.loc[index].nz
print(nozzle_assign)
print('')
if component_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
component_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
component_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
if index == -1:
component_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
component_assign.loc[cycle, 'H{}'.format(head + 1)] = component_data.loc[index].part
print(component_assign)
print('')
if feeder_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
feedr_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(feeder_slot_result):
feedr_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for head in range(max_head_index):
slot = feeder_slot_result[cycle][head]
if slot == -1:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'A'
else:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'F{}'.format(
slot) if slot <= max_slot_index // 2 else 'R{}'.format(slot - max_head_index)
print(feedr_assign)
print('')
def axis_moving_time(distance, axis=0): def axis_moving_time(distance, axis=0):
@ -172,8 +129,12 @@ def timer_wrapper(func):
def measure_time(*args, **kwargs): def measure_time(*args, **kwargs):
start_time = time.time() start_time = time.time()
result = func(*args, **kwargs) result = func(*args, **kwargs)
hinter = True
print(f"function {func.__name__} running time : {time.time() - start_time:.3f} s") for key, val in kwargs.items():
if key == 'hinter':
hinter = val
if hinter:
print(f"function {func.__name__} running time : {time.time() - start_time:.3f} s")
return result return result
return measure_time return measure_time
@ -440,7 +401,7 @@ def dynamic_programming_cycle_path(pcb_data, cycle_placement, assigned_feeder):
@timer_wrapper @timer_wrapper
def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result): def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, hinter=True):
placement_result, head_sequence_result = [], [] placement_result, head_sequence_result = [], []
mount_point_index = [[] for _ in range(len(component_data))] mount_point_index = [[] for _ in range(len(component_data))]
mount_point_pos = [[] for _ in range(len(component_data))] mount_point_pos = [[] for _ in range(len(component_data))]
@ -951,7 +912,7 @@ def constraint_swap_mutation(component_points, individual, machine_number):
offspring = individual.copy() offspring = individual.copy()
idx, component_index = 0, random.randint(0, len(component_points) - 1) idx, component_index = 0, random.randint(0, len(component_points) - 1)
for _, points in component_points: for points in component_points.values():
if component_index == 0: if component_index == 0:
while True: while True:
index1, index2 = random.sample(range(points + machine_number - 2), 2) index1, index2 = random.sample(range(points + machine_number - 2), 2)
@ -988,6 +949,7 @@ def random_selective(data, possibility): # 依概率选择随机数
possibility = [p / sum_val for p in possibility] possibility = [p / sum_val for p in possibility]
random_val = random.random() random_val = random.random()
idx = 0
for idx, val in enumerate(possibility): for idx, val in enumerate(possibility):
random_val -= val random_val -= val
if random_val <= 0: if random_val <= 0:
@ -1061,17 +1023,25 @@ def get_line_config_number(machine_number, component_number):
return div_counter return div_counter
def partial_data_convert(pcb_data, component_data, machine_assign, machine_number): def convert_line_assigment(pcb_data, component_data, assignment_result):
assignment_result = copy.deepcopy(machine_assign) machine_number = len(assignment_result)
placement_points = []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame) partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(machine_number): for machine_index in range(machine_number):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns) partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True) partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder === # === averagely assign available feeder ===
for part_index, data in component_data.iterrows(): for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit'] feeder_limit = data['feeder-limit']
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(max_machine_index)] feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
partial_component_data[machine_index].loc[part_index, 'points'] = 0
for machine_index in range(machine_number): for machine_index in range(machine_number):
if feeder_points[machine_index] == 0: if feeder_points[machine_index] == 0:
@ -1079,7 +1049,7 @@ def partial_data_convert(pcb_data, component_data, machine_assign, machine_numbe
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1) arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index]['feeder-limit'] = arg_feeder partial_component_data[machine_index].loc[part_index, 'feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder feeder_limit -= arg_feeder
for machine_index in range(machine_number): for machine_index in range(machine_number):
@ -1088,27 +1058,126 @@ def partial_data_convert(pcb_data, component_data, machine_assign, machine_numbe
if feeder_points[machine_index] == 0: if feeder_points[machine_index] == 0:
continue continue
partial_component_data[machine_index].loc[part_index]['feeder-limit'] += 1 partial_component_data[machine_index].loc[part_index, 'feeder-limit'] += 1
feeder_limit -= 1 feeder_limit -= 1
for machine_index in range(machine_number): for machine_index in range(machine_number):
if feeder_points[machine_index] > 0: if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index]['feeder-limit'] > 0 assert partial_component_data[machine_index].loc[part_index, 'feeder-limit'] > 0
# === assign placements === # === assign placements ===
component_machine_index = [0 for _ in range(len(component_data))] part2idx = defaultdict(int)
for idx, data in component_data.iterrows():
part2idx[data.part] = idx
machine_average_pos = [[0, 0] for _ in range(machine_number)]
machine_step_counter = [0 for _ in range(machine_number)]
part_pcb_data = defaultdict(list)
for _, data in pcb_data.iterrows(): for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0] part_pcb_data[part2idx[data.part]].append(data)
while True:
machine_index = component_machine_index[part_index] multiple_component_index = []
if assignment_result[machine_index][part_index] == 0: for part_index in range(len(component_data)):
component_machine_index[part_index] += 1 machine_assign_set = []
machine_index += 1 for machine_index in range(machine_number):
else: if assignment_result[machine_index][part_index]:
break machine_assign_set.append(machine_index)
assignment_result[machine_index][part_index] -= 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T]) if len(machine_assign_set) == 1:
for data in part_pcb_data[part_index]:
machine_index = machine_assign_set[0]
machine_average_pos[machine_index][0] += data.x
machine_average_pos[machine_index][1] += data.y
machine_step_counter[machine_index] += 1
partial_component_data[machine_index].loc[part_index, 'points'] += 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
elif len(machine_assign_set) > 1:
multiple_component_index.append(part_index)
for machine_index in range(machine_number):
if machine_step_counter[machine_index] == 0:
continue
machine_average_pos[machine_index][0] /= machine_step_counter[machine_index]
machine_average_pos[machine_index][1] /= machine_step_counter[machine_index]
for part_index in multiple_component_index:
for data in part_pcb_data[part_index]:
idx = -1
min_dist = None
for machine_index in range(machine_number):
if partial_component_data[machine_index].loc[part_index, 'points'] >= \
assignment_result[machine_index][part_index]:
continue
dist = (data.x - machine_average_pos[machine_index][0]) ** 2 + (
data.y - machine_average_pos[machine_index][1]) ** 2
if min_dist is None or dist < min_dist:
min_dist, idx = dist, machine_index
assert idx >= 0
machine_step_counter[idx] += 1
machine_average_pos[idx][0] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][0] + data.x / \
machine_step_counter[idx]
machine_average_pos[idx][1] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][1] + data.y / \
machine_step_counter[idx]
partial_component_data[idx].loc[part_index, 'points'] += 1
partial_pcb_data[idx] = pd.concat([partial_pcb_data[idx], pd.DataFrame(data).T])
# === adjust the number of available feeders for single optimization separately ===
# for machine_index, data in partial_pcb_data.items():
# part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
# for part_index, cp_data in partial_component_data[machine_index].iterrows():
# if assignment_result[machine_index][part_index]:
# part_info.append(
# [part_index, assignment_result[machine_index][part_index], 1, cp_data['feeder-limit']])
#
# part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
# start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
# while start_index < len(part_info):
# assign_part_point, assign_part_index = [], []
# for idx_ in range(start_index, end_index + 1):
# for _ in range(part_info[idx_][2]):
# assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
# assign_part_index.append(idx_)
#
# variance = np.std(assign_part_point)
# while start_index <= end_index:
# part_info_index = assign_part_index[np.argmax(assign_part_point)]
#
# if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
# part_info[part_info_index][2] += 1
# end_index -= 1
#
# new_assign_part_point, new_assign_part_index = [], []
# for idx_ in range(start_index, end_index + 1):
# for _ in range(part_info[idx_][2]):
# new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
# new_assign_part_index.append(idx_)
#
# new_variance = np.std(new_assign_part_point)
# if variance < new_variance:
# part_info[part_info_index][2] -= 1
# end_index += 1
# break
#
# variance = new_variance
# assign_part_index, assign_part_point = new_assign_part_index.copy(), new_assign_part_point.copy()
# else:
# break
#
# start_index = end_index + 1
# end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
#
# max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
# for info in part_info:
# partial_component_data[machine_index].loc[info[0], 'feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
for machine_index in range(machine_number):
partial_component_data[machine_index] = partial_component_data[machine_index][
partial_component_data[machine_index]['points'] != 0].reset_index(drop=True)
return partial_pcb_data, partial_component_data return partial_pcb_data, partial_component_data

View File

@ -1,25 +1,121 @@
import math
from base_optimizer.optimizer_common import * from base_optimizer.optimizer_common import *
from base_optimizer.result_analysis import placement_info_evaluation
@timer_wrapper @timer_wrapper
def feeder_allocate(component_data, pcb_data, feeder_data, figure=False): def feeder_priority_assignment(component_data, pcb_data, hinter=True):
feeder_allocate_val = None
component_result, cycle_result, feeder_slot_result = None, None, None
nozzle_pattern_list = feeder_nozzle_pattern(component_data)
pbar = tqdm(total=len(nozzle_pattern_list), desc='feeder priority process') if hinter else None
# 第1步确定吸嘴分配模式
for nozzle_pattern in nozzle_pattern_list:
feeder_data = pd.DataFrame(columns=['slot', 'part', 'arg'])
# 第2步分配供料器位置
feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figure=False)
# 第3步扫描供料器基座确定元件拾取的先后顺序
component_assign, cycle_assign, feeder_slot_assign = feeder_base_scan(component_data, pcb_data, feeder_data)
info = placement_info_evaluation(component_data, pcb_data, component_assign, cycle_assign,
feeder_slot_assign, None, None, hinter=False)
val = 0.4 * info.cycle_counter + 2.15 * info.nozzle_change_counter + 0.11 * info.pickup_counter \
+ 0.005 * info.anc_round_counter
if feeder_allocate_val is None or val < feeder_allocate_val:
feeder_allocate_val = val
component_result, cycle_result, feeder_slot_result = component_assign, cycle_assign, feeder_slot_assign
if pbar:
pbar.update(1)
return component_result, cycle_result, feeder_slot_result
def feeder_nozzle_pattern(component_data):
nozzle_pattern_list = []
nozzle_points = defaultdict(int)
for _, data in component_data.iterrows():
if data.points == 0:
continue
nozzle_points[data.nz] += data.points
head_assign_indexes = [int(math.ceil(max_head_index + 0.5) - 4.5 - pow(-1, h) * (math.ceil(h / 2) - 0.5)) for h in
range(1, max_head_index + 1)]
while len(nozzle_points):
nozzle_heads, nozzle_indices = defaultdict(int), defaultdict(str),
min_points_nozzle = None
for idx, (nozzle, points) in enumerate(nozzle_points.items()):
nozzle_heads[nozzle], nozzle_indices[idx] = 1, nozzle
if min_points_nozzle is None or points < nozzle_points[min_points_nozzle]:
min_points_nozzle = nozzle
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
elif nozzle_points[nozzle] / head_num == nozzle_points[max_cycle_nozzle] / nozzle_heads[max_cycle_nozzle]:
if head_num > nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
for permu in itertools.permutations(nozzle_indices.keys()):
nozzle_pattern_list.append([])
for idx in permu:
for _ in range(nozzle_heads[nozzle_indices[idx]]):
nozzle_pattern_list[-1].append(nozzle_indices[idx])
if len(nozzle_points.keys()) > 1:
nozzle_average_points = []
for nozzle, head in nozzle_heads.items():
nozzle_average_points.append([nozzle, head, nozzle_points[nozzle] / head])
nozzle_average_points = sorted(nozzle_average_points, key=lambda x: -x[2])
idx = 0
nozzle_pattern_list.append(['' for _ in range(max_head_index)])
for nozzle, head, _ in nozzle_average_points:
for _ in range(head):
nozzle_pattern_list[-1][head_assign_indexes[idx]] = nozzle
idx += 1
idx = 1
nozzle_pattern_list.append(['' for _ in range(max_head_index)])
for nozzle, head, _ in nozzle_average_points:
for _ in range(head):
nozzle_pattern_list[-1][head_assign_indexes[-idx]] = nozzle
idx += 1
nozzle_points.pop(min_points_nozzle)
return nozzle_pattern_list
def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figure=False, hinter=True):
feeder_points, feeder_division_points = defaultdict(int), defaultdict(int) # 供料器贴装点数 feeder_points, feeder_division_points = defaultdict(int), defaultdict(int) # 供料器贴装点数
mount_center_pos = defaultdict(int) mount_center_pos = defaultdict(float)
feeder_limit, feeder_arrange = defaultdict(int), defaultdict(int) feeder_limit, feeder_arrange = defaultdict(int), defaultdict(int)
part_nozzle = defaultdict(str) part_nozzle = defaultdict(str)
feeder_base = [-2] * max_slot_index # 已安装在供料器基座上的元件(-2: 未分配,-1: 占用状态) feeder_base = [-2] * max_slot_index # 已安装在供料器基座上的元件(-2: 未分配,-1: 占用状态)
feeder_base_points = [0] * max_slot_index # 供料器基座结余贴装点数量 feeder_base_points = [0] * max_slot_index # 供料器基座结余贴装点数量
component_index = defaultdict(int)
for idx, data in component_data.iterrows():
component_index[data.part] = idx
feeder_limit[idx] = data['feeder-limit']
feeder_arrange[idx] = 0
for _, data in pcb_data.iterrows(): for _, data in pcb_data.iterrows():
pos, part = data.x + stopper_pos[0], data.part pos, part = data.x + stopper_pos[0], data.part
part_index = component_data[component_data.part == part].index.tolist()[0] part_index = component_index[part]
if part not in component_data:
feeder_limit[part_index] = component_data.loc[part_index]['feeder-limit']
feeder_arrange[part_index] = 0
feeder_points[part_index] += 1 feeder_points[part_index] += 1
mount_center_pos[part_index] += ((pos - mount_center_pos[part_index]) / feeder_points[part_index]) mount_center_pos[part_index] += ((pos - mount_center_pos[part_index]) / feeder_points[part_index])
@ -37,7 +133,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
if feeder_data is not None: if feeder_data is not None:
for _, feeder in feeder_data.iterrows(): for _, feeder in feeder_data.iterrows():
slot, part = feeder.slot, feeder.part slot, part = feeder.slot, feeder.part
part_index = component_data[component_data.part == part].index.tolist()[0] part_index = component_index[part]
# 供料器基座分配位置和对应贴装点数 # 供料器基座分配位置和对应贴装点数
feeder_base[slot], feeder_base_points[slot] = part_index, feeder_division_points[part_index] feeder_base[slot], feeder_base_points[slot] = part_index, feeder_division_points[part_index]
@ -63,78 +159,14 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
nozzle_component_points[nozzle].pop(index_) nozzle_component_points[nozzle].pop(index_)
break break
nozzle_assigned_counter = optimal_nozzle_assignment(component_data, pcb_data) head_assign_indexes = [int(math.ceil(max_head_index + 0.5) - 4.5 - pow(-1, h) * (math.ceil(h / 2) - 0.5)) for h in
head_assign_indexes = list(range(max_head_index)) range(1, max_head_index + 1)]
nozzle_pattern, optimal_nozzle_pattern, optimal_nozzle_points = [], None, 0
# 先排序
nozzle_pattern_list = []
for nozzle, counter in nozzle_assigned_counter.items():
nozzle_pattern_list.append([nozzle, sum(nozzle_component_points[nozzle]) // counter])
nozzle_pattern_list.sort(key=lambda x: x[1], reverse=True)
# 后确定吸嘴分配模式
upper_head, extra_head = defaultdict(int), defaultdict(int)
head_index = []
for nozzle, head in nozzle_assigned_counter.items():
# 每个吸嘴能达成同时拾取数目的上限
upper_head[nozzle] = min(len(nozzle_component[nozzle]), head)
extra_head[nozzle] = head - upper_head[nozzle]
head_counter = (sum(upper_head.values()) - 1) // 2
while head_counter >= 0:
if head_counter != (sum(upper_head.values()) - 1) - head_counter:
head_index.append((sum(upper_head.values()) - 1) - head_counter)
head_index.append(head_counter)
head_counter -= 1
nozzle_pattern = [None for _ in range(sum(upper_head.values()))]
for nozzle in upper_head.keys():
counter = upper_head[nozzle]
while counter:
nozzle_pattern[head_index[0]] = nozzle
counter -= 1
head_index.pop(0)
head = 0
while head + sum(extra_head.values()) <= len(nozzle_pattern):
extra_head_cpy = copy.deepcopy(extra_head)
increment = 0
while increment < sum(extra_head.values()):
extra_head_cpy[nozzle_pattern[head + increment]] -= 1
increment += 1
check_extra_head = True
for head_ in extra_head_cpy.values():
if head_ != 0:
check_extra_head = False # 任一项不为0 说明不构成
break
if check_extra_head:
increment = 0
while increment < sum(extra_head.values()):
nozzle_pattern.append(nozzle_pattern[head + increment])
increment += 1
for nozzle in extra_head.keys():
extra_head[nozzle] = 0
break
head += 1
for nozzle, head_ in extra_head.items():
while head_:
nozzle_pattern.append(nozzle)
head_ -= 1
assert len(nozzle_pattern) == max_head_index assert len(nozzle_pattern) == max_head_index
while True: while True:
best_assign, best_assign_points = [], [] best_assign, best_assign_points = [], []
best_assign_slot, best_assign_value = -1, -np.Inf best_assign_slot, best_assign_value = -1, -np.Inf
best_nozzle_component, best_nozzle_component_points = None, None best_nozzle_component, best_nozzle_component_points = None, None
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1): for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
nozzle_assigned_counter_cpy = copy.deepcopy(nozzle_assigned_counter)
feeder_assign, feeder_assign_points = [], [] feeder_assign, feeder_assign_points = [], []
tmp_feeder_limit, tmp_feeder_points = feeder_limit.copy(), feeder_points.copy() tmp_feeder_limit, tmp_feeder_points = feeder_limit.copy(), feeder_points.copy()
tmp_nozzle_component, tmp_nozzle_component_points = copy.deepcopy(nozzle_component), copy.deepcopy( tmp_nozzle_component, tmp_nozzle_component_points = copy.deepcopy(nozzle_component), copy.deepcopy(
@ -144,24 +176,14 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
for head in range(max_head_index): for head in range(max_head_index):
feeder_assign.append(feeder_base[slot + head * interval_ratio]) feeder_assign.append(feeder_base[slot + head * interval_ratio])
if scan_part := feeder_assign[-1] >= 0: if feeder_assign[-1] >= 0:
nozzle = part_nozzle[scan_part]
feeder_assign_points.append(feeder_base_points[slot + head * interval_ratio]) feeder_assign_points.append(feeder_base_points[slot + head * interval_ratio])
if feeder_assign_points[-1] <= 0: if feeder_assign_points[-1] <= 0:
feeder_assign[-1], feeder_assign_points[-1] = -1, 0 feeder_assign[-1], feeder_assign_points[-1] = -1, 0
elif nozzle in nozzle_assigned_counter_cpy.keys():
nozzle_assigned_counter_cpy[nozzle] -= 1
if nozzle_assigned_counter_cpy[nozzle] == 0:
nozzle_assigned_counter_cpy.pop(nozzle)
else: else:
feeder_assign_points.append(0) feeder_assign_points.append(0)
if -2 not in feeder_assign: # 无可用槽位 if -2 not in feeder_assign:
if sum(feeder_assign_points) > optimal_nozzle_points:
optimal_nozzle_points = sum(feeder_assign_points)
optimal_nozzle_pattern = [''] * max_head_index
for head in range(max_head_index):
optimal_nozzle_pattern[head] = part_nozzle[feeder_assign[head]]
continue continue
assign_part_stack, assign_part_stack_points = [], [] assign_part_stack, assign_part_stack_points = [], []
@ -172,7 +194,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
if len(nozzle_pattern) == 0: # 吸嘴匹配模式为空,优先分配元件,根据分配元件倒推吸嘴匹配模式 if len(nozzle_pattern) == 0: # 吸嘴匹配模式为空,优先分配元件,根据分配元件倒推吸嘴匹配模式
nozzle_assign = '' nozzle_assign = ''
max_points, max_nozzle_points = 0, 0 max_points, max_nozzle_points = 0, 0
for nozzle in nozzle_assigned_counter_cpy.keys(): for nozzle in set(nozzle_pattern):
if len(tmp_nozzle_component[nozzle]) == 0: if len(tmp_nozzle_component[nozzle]) == 0:
continue continue
part = max(tmp_nozzle_component[nozzle], part = max(tmp_nozzle_component[nozzle],
@ -229,12 +251,6 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
else: else:
part = -1 # 存在位置冲突的元件,不占用可用供料器数 part = -1 # 存在位置冲突的元件,不占用可用供料器数
# 更新吸嘴匹配模式的吸嘴数
if nozzle_assign in nozzle_assigned_counter_cpy.keys():
nozzle_assigned_counter_cpy[nozzle_assign] -= 1
if nozzle_assigned_counter_cpy[nozzle_assign] == 0:
nozzle_assigned_counter_cpy.pop(nozzle_assign)
if part >= 0 and tmp_feeder_limit[part] == 0: if part >= 0 and tmp_feeder_limit[part] == 0:
continue continue
@ -253,7 +269,6 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
if feeder != -2: if feeder != -2:
continue continue
for idx, part in enumerate(assign_part_stack): for idx, part in enumerate(assign_part_stack):
feeder_type = component_data.loc[part].fdr feeder_type = component_data.loc[part].fdr
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][ extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][
1] - slot_interval, 1 1] - slot_interval, 1
@ -282,7 +297,8 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
part, points = assign_part_stack[0], assign_part_stack_points[0] part, points = assign_part_stack[0], assign_part_stack_points[0]
feeder_type = component_data.loc[part].fdr feeder_type = component_data.loc[part].fdr
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval, 1 extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval
extra_slot = 1
slot_overlap = False slot_overlap = False
while extra_width > 0: while extra_width > 0:
@ -295,8 +311,8 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
if not slot_overlap: if not slot_overlap:
feeder_assign[head], feeder_assign_points[head] = part, points feeder_assign[head], feeder_assign_points[head] = part, points
extra_width, extra_head = feeder_width[feeder_type][0] + feeder_width[feeder_type][ extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - head_interval
1] - head_interval, 1 extra_head = 1
while extra_width > 0 and head + extra_head < max_head_index: while extra_width > 0 and head + extra_head < max_head_index:
feeder_assign[head + extra_head] = -1 feeder_assign[head + extra_head] = -1
extra_head += 1 extra_head += 1
@ -325,8 +341,8 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
for head, feeder_ in enumerate(feeder_assign): for head, feeder_ in enumerate(feeder_assign):
if feeder_ < 0: if feeder_ < 0:
continue continue
average_slot.append( average_slot.append((mount_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1)
(mount_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1 - head * interval_ratio)
if nozzle_pattern and component_data.loc[feeder_].nz != nozzle_pattern[head]: if nozzle_pattern and component_data.loc[feeder_].nz != nozzle_pattern[head]:
nozzle_change_counter += 1 nozzle_change_counter += 1
@ -346,7 +362,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
continue continue
feeder_assign_points_cpy[head] -= min(points_filter) feeder_assign_points_cpy[head] -= min(points_filter)
assign_value -= 1e2 * e_nz_change * nozzle_change_counter + 1e-5 * abs(slot - average_slot) assign_value -= (1e2 * e_nz_change * nozzle_change_counter + 1e-5 * abs(slot - average_slot))
if assign_value >= best_assign_value and sum(feeder_assign_points) != 0: if assign_value >= best_assign_value and sum(feeder_assign_points) != 0:
@ -359,8 +375,6 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
if not best_assign_points: if not best_assign_points:
break break
if len(nozzle_pattern) == 0:
nozzle_pattern = [''] * max_head_index
for idx, part in enumerate(best_assign): for idx, part in enumerate(best_assign):
if part < 0: if part < 0:
continue continue
@ -410,34 +424,8 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
nozzle_component, nozzle_component_points = copy.deepcopy(best_nozzle_component), copy.deepcopy( nozzle_component, nozzle_component_points = copy.deepcopy(best_nozzle_component), copy.deepcopy(
best_nozzle_component_points) best_nozzle_component_points)
if sum(best_assign_points) > optimal_nozzle_points:
optimal_nozzle_points = sum(best_assign_points)
optimal_nozzle_pattern = nozzle_pattern.copy()
assert not list(filter(lambda x: x < 0, feeder_limit.values())) # 分配供料器数目在限制范围内 assert not list(filter(lambda x: x < 0, feeder_limit.values())) # 分配供料器数目在限制范围内
# 若所有供料器均安装在基座上,重新对基座进行扫描,确定最优吸嘴模式(有序)
if not optimal_nozzle_points:
feeder_base, feeder_base_points = [-2] * max_slot_index, [0] * max_slot_index
for _, feeder in feeder_data.iterrows():
part_index = component_data[component_data.part == feeder.part].index.tolist()[0]
# 供料器基座分配位置和对应贴装点数
feeder_base[feeder.slot], feeder_base_points[feeder.slot] = part_index, feeder_division_points[part_index]
# 前基座 TODO: 后基座
for slot in range(max_slot_index // 2 - (max_head_index - 1) * interval_ratio):
sum_scan_points = 0
for head in range(max_head_index):
sum_scan_points += feeder_base_points[slot + head * interval_ratio]
if sum_scan_points > optimal_nozzle_points:
optimal_nozzle_pattern = ['' for _ in range(max_head_index)]
for head in range(max_head_index):
if part := feeder_base[slot + head * interval_ratio] == -2:
continue
optimal_nozzle_pattern[head] = part_nozzle[part]
# 更新供料器占位信息 # 更新供料器占位信息
for _, data in feeder_data.iterrows(): for _, data in feeder_data.iterrows():
feeder_base[data.slot] = -1 feeder_base[data.slot] = -1
@ -453,7 +441,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
# 绘制供料器位置布局 # 绘制供料器位置布局
for slot in range(max_slot_index // 2): for slot in range(max_slot_index // 2):
plt.scatter(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1], marker='x', s=12, color='black', alpha=0.5) plt.scatter(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1], marker='x', s=12, color='black', alpha=0.5)
plt.text(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1] - 45, slot + 1, ha='center', va='bottom', plt.text(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1] - 45, str(slot + 1), ha='center', va='bottom',
size=8) size=8)
feeder_assign_range = [] feeder_assign_range = []
@ -497,26 +485,31 @@ def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
plt.ylim(-10, 100) plt.ylim(-10, 100)
plt.show() plt.show()
return optimal_nozzle_pattern
def feeder_base_scan(component_data, pcb_data, feeder_data):
@timer_wrapper
def feeder_base_scan(component_data, pcb_data, feeder_data, nozzle_pattern):
feeder_assign_check = set() feeder_assign_check = set()
for _, feeder in feeder_data.iterrows(): for _, feeder in feeder_data.iterrows():
feeder_assign_check.add(feeder.part) feeder_assign_check.add(feeder.part)
component_points = [0] * len(component_data) component_points = [0] * len(component_data)
for i, data in pcb_data.iterrows(): component_index = defaultdict(int)
part_index = component_data[component_data.part == data.part].index.tolist()[0] for idx, data in component_data.iterrows():
if data.nz not in nozzle_limit.keys() or nozzle_limit[data.nz] <= 0:
component_points[part_index] += 1 info = 'there is no available nozzle [' + data.nz + '] for the assembly process'
nozzle_type = component_data.loc[part_index].nz
if nozzle_type not in nozzle_limit.keys() or nozzle_limit[nozzle_type] <= 0:
info = 'there is no available nozzle [' + nozzle_type + '] for the assembly process'
raise ValueError(info) raise ValueError(info)
component_points[idx] = data.points
component_index[data.part] = idx
assert len(feeder_assign_check) == len(component_points) - component_points.count(0) # 所有供料器均已分配槽位 assert len(feeder_assign_check) == len(component_points) - component_points.count(0) # 所有供料器均已分配槽位
mount_center_slot = defaultdict(float)
for _, data in pcb_data.iterrows():
part_index = component_index[data.part]
mount_center_slot[part_index] += (data.x - mount_center_slot[part_index])
for idx, pos in mount_center_slot.items():
mount_center_slot[idx] = (pos / component_points[idx] + stopper_pos[0] - slotf1_pos[0]) / slot_interval + 1
feeder_part = [-1] * max_slot_index feeder_part = [-1] * max_slot_index
for _, data in feeder_data.iterrows(): for _, data in feeder_data.iterrows():
component_index = component_data[component_data.part == data.part].index.tolist() component_index = component_data[component_data.part == data.part].index.tolist()
@ -528,233 +521,263 @@ def feeder_base_scan(component_data, pcb_data, feeder_data, nozzle_pattern):
component_result, cycle_result, feeder_slot_result = [], [], [] # 贴装点索引和拾取槽位优化结果 component_result, cycle_result, feeder_slot_result = [], [], [] # 贴装点索引和拾取槽位优化结果
nozzle_mode = [nozzle_pattern] # 吸嘴匹配模式 sum_nozzle_points, nozzle_pattern = -1, None
with tqdm(total=len(pcb_data)) as pbar: for slot in range(max_slot_index // 2 - (max_head_index - 1) * interval_ratio):
pbar.set_description('feeder scan process') cur_nozzle_points, cur_nozzle_pattern = 0, ['' for _ in range(max_head_index)]
pbar_prev = 0 for head in range(max_head_index):
value_increment_base = 0 if (part := feeder_part[slot + head * interval_ratio]) == -1:
while True: continue
# === 周期内循环 === cur_nozzle_pattern[head] = component_data.loc[part].nz
assigned_part = [-1 for _ in range(max_head_index)] # 当前扫描到的头分配元件信息 cur_nozzle_points += component_points[part]
assigned_cycle = [0 for _ in range(max_head_index)] # 当前扫描到的元件最大分配次数 if cur_nozzle_points > sum_nozzle_points:
assigned_slot = [-1 for _ in range(max_head_index)] # 当前扫描到的供料器分配信息 sum_nozzle_points = cur_nozzle_points
nozzle_pattern = cur_nozzle_pattern
best_assigned_eval_func = -float('inf') nozzle_mode, nozzle_mode_cycle = [nozzle_pattern], [0] # 吸嘴匹配模式
nozzle_insert_cycle = 0
for cycle_index, nozzle_cycle in enumerate(nozzle_mode):
scan_eval_func_list = [] # 若干次扫描得到的最优解
# nozzle_cycle 吸嘴模式下,已扫描到的最优结果
cur_scan_part = [-1 for _ in range(max_head_index)]
cur_scan_cycle = [0 for _ in range(max_head_index)]
cur_scan_slot = [-1 for _ in range(max_head_index)]
cur_nozzle_limit = copy.deepcopy(nozzle_limit)
while True: value_increment_base = 0
best_scan_part, best_scan_cycle = [-1 for _ in range(max_head_index)], [-1 for _ in while True:
range(max_head_index)] # === 周期内循环 ===
best_scan_slot = [-1 for _ in range(max_head_index)] assigned_part = [-1 for _ in range(max_head_index)] # 当前扫描到的头分配元件信息
best_scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit) assigned_cycle = [0 for _ in range(max_head_index)] # 当前扫描到的元件最大分配次数
assigned_slot = [-1 for _ in range(max_head_index)] # 当前扫描到的供料器分配信息
scan_eval_func, search_break = -float('inf'), True best_assigned_eval_func = -float('inf')
nozzle_insert_cycle = 0
for cycle_index, nozzle_cycle in enumerate(nozzle_mode):
scan_eval_func_list = [] # 若干次扫描得到的最优解
# nozzle_cycle 吸嘴模式下,已扫描到的最优结果
cur_scan_part = [-1 for _ in range(max_head_index)]
cur_scan_cycle = [0 for _ in range(max_head_index)]
cur_scan_slot = [-1 for _ in range(max_head_index)]
cur_nozzle_limit = copy.deepcopy(nozzle_limit)
# 前供料器基座扫描 while True:
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1): best_scan_part = [-1 for _ in range(max_head_index)]
scan_cycle, scan_part, scan_slot = cur_scan_cycle.copy(), cur_scan_part.copy(), cur_scan_slot.copy() best_scan_cycle = [-1 for _ in range(max_head_index)]
scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit) best_scan_slot = [-1 for _ in range(max_head_index)]
# 预扫描确定各类型元件拾取数目(前瞻) best_scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit)
preview_scan_part = defaultdict(int) scan_eval_func, search_break = -float('inf'), True
for head in range(max_head_index):
part = feeder_part[slot + head * interval_ratio]
# 贴装头和拾取槽位满足对应关系 # 前供料器基座扫描
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count( for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
part) < component_points[part]: if sum(feeder_part[slot: slot + max_head_index * interval_ratio: interval_ratio]) == -max_head_index:
preview_scan_part[part] += 1 continue
component_counter = 0 scan_cycle, scan_part, scan_slot = cur_scan_cycle.copy(), cur_scan_part.copy(), cur_scan_slot.copy()
for head in range(max_head_index): scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit)
part = feeder_part[slot + head * interval_ratio]
# 1.匹配条件满足: 贴装头和拾取槽位满足对应关系
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
part) < component_points[part]:
# 2.匹配条件满足:不超过可用吸嘴数的限制
nozzle = component_data.loc[part].nz
if scan_nozzle_limit[nozzle] <= 0:
continue
# 3.增量条件满足: 引入新的元件类型不会使代价函数的值减少(前瞻) # 预扫描确定各类型元件拾取数目(前瞻
if scan_cycle.count(0) == max_head_index: preview_scan_part = defaultdict(int)
gang_pick_change = component_points[part] for head in range(max_head_index):
else: part = feeder_part[slot + head * interval_ratio]
prev_cycle = min(filter(lambda x: x > 0, scan_cycle))
# 同时拾取数的提升
gang_pick_change = min(prev_cycle, component_points[part] // preview_scan_part[part])
# 4.拾取移动距离条件满足: 邻近元件进行同时抓取,降低移动路径长度 # 贴装头和拾取槽位满足对应关系
# reference_slot = -1 if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
# for head_, slot_ in enumerate(scan_slot): part) < component_points[part]:
# if slot_ != -1: preview_scan_part[part] += 1
# reference_slot = slot_ - head_ * interval_ratio
# if reference_slot != -1 and abs(reference_slot - slot) > (max_head_index - 1) * interval_ratio:
# continue
# 5.同时拾取的增量 和 吸嘴更换次数比较 component_counter = 0
prev_nozzle_change = 0 for head in range(max_head_index):
if cycle_index + 1 < len(nozzle_mode): part = feeder_part[slot + head * interval_ratio]
prev_nozzle_change = 2 * (nozzle_cycle[head] != nozzle_mode[cycle_index + 1][head]) # 1.匹配条件满足: 贴装头和拾取槽位满足对应关系
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
part) < component_points[part]:
# 2.匹配条件满足:不超过可用吸嘴数的限制
nozzle = component_data.loc[part].nz
if scan_nozzle_limit[nozzle] <= 0:
continue
# 避免首个周期吸杆占用率低的问题 # 3.增量条件满足: 引入新的元件类型不会使代价函数的值减少(前瞻)
if nozzle_cycle[head] == '': if scan_cycle.count(0) == max_head_index:
nozzle_change = 0 gang_pick_change = component_points[part]
else: else:
nozzle_change = 2 * (nozzle != nozzle_cycle[head]) prev_cycle = min(filter(lambda x: x > 0, scan_cycle))
# 同时拾取数的提升
gang_pick_change = min(prev_cycle, component_points[part] // preview_scan_part[part])
if cycle_index + 1 < len(nozzle_mode): # 4.拾取移动距离条件满足: 邻近元件进行同时抓取,降低移动路径长度
nozzle_change += 2 * (nozzle != nozzle_mode[cycle_index + 1][head]) # reference_slot = -1
nozzle_change -= prev_nozzle_change # for head_, slot_ in enumerate(scan_slot):
# if slot_ != -1:
# reference_slot = slot_ - head_ * interval_ratio
# if reference_slot != -1 and abs(reference_slot - slot) > (max_head_index - 1) * interval_ratio:
# continue
val = e_gang_pick * gang_pick_change - e_nz_change * nozzle_change # 5.同时拾取的增量 和 吸嘴更换次数比较
if val < value_increment_base: prev_nozzle_change = 0
continue if cycle_index + 1 < len(nozzle_mode):
prev_nozzle_change = 2 * (nozzle_cycle[head] != nozzle_mode[cycle_index + 1][head])
component_counter += 1 # 避免首个周期吸杆占用率低的问题
if nozzle_cycle[head] == '':
nozzle_change = 0
else:
nozzle_change = 2 * (nozzle != nozzle_cycle[head])
scan_part[head] = part if cycle_index + 1 < len(nozzle_mode):
scan_cycle[head] = component_points[part] // preview_scan_part[part] nozzle_change += 2 * (nozzle != nozzle_mode[cycle_index + 1][head])
scan_slot[head] = slot + head * interval_ratio nozzle_change -= prev_nozzle_change
scan_nozzle_limit[nozzle] -= 1 val = e_gang_pick * gang_pick_change - e_nz_change * nozzle_change
if val < value_increment_base:
continue
nozzle_counter = 0 # 吸嘴更换次数 component_counter += 1
# 上一周期
for head, nozzle in enumerate(nozzle_cycle): scan_part[head] = part
scan_cycle[head] = component_points[part] // preview_scan_part[part]
scan_slot[head] = slot + head * interval_ratio
scan_nozzle_limit[nozzle] -= 1
nozzle_counter = 0 # 吸嘴更换次数
# 上一周期
for head, nozzle in enumerate(nozzle_cycle):
if scan_part[head] == -1:
continue
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
nozzle_counter += 2
# 下一周期(额外增加的吸嘴更换次数)
if cycle_index + 1 < len(nozzle_mode):
for head, nozzle in enumerate(nozzle_mode[cycle_index + 1]):
if scan_part[head] == -1: if scan_part[head] == -1:
continue continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '': if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
nozzle_counter += 2 new_counter += 2
nozzle_counter += new_counter - prev_counter
else:
for head, nozzle in enumerate(nozzle_mode[0]):
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
# 下一周期(额外增加的吸嘴更换次数) if component_counter == 0: # 当前情形下未扫描到任何元件
if cycle_index + 1 < len(nozzle_mode): continue
for head, nozzle in enumerate(nozzle_mode[cycle_index + 1]): search_break = False
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
else:
for head, nozzle in enumerate(nozzle_mode[0]):
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
if component_counter == 0: # 当前情形下未扫描到任何元件 scan_part_head = defaultdict(list)
for head, part in enumerate(scan_part):
if part == -1:
continue continue
scan_part_head[part].append(head)
search_break = False for part, heads in scan_part_head.items():
part_cycle = component_points[part] // len(heads)
for head in heads:
scan_cycle[head] = part_cycle
scan_part_head = defaultdict(list) # 计算扫描后的代价函数,记录扫描后的最优解
for head, part in enumerate(scan_part): # 短期收益
if part == -1: cycle = min(filter(lambda x: x > 0, scan_cycle))
continue gang_pick_counter, gang_pick_slot_set = 0, set()
scan_part_head[part].append(head) for head, pick_slot in enumerate(scan_slot):
gang_pick_slot_set.add(pick_slot - head * interval_ratio)
for part, heads in scan_part_head.items(): eval_func_short_term = e_gang_pick * (max_head_index - scan_slot.count(-1) - len(
part_cycle = component_points[part] // len(heads) gang_pick_slot_set)) * cycle - e_nz_change * nozzle_counter
for head in heads:
scan_cycle[head] = part_cycle
# 计算扫描后的代价函数,记录扫描后的最优解 # 长期收益
# 短期收益 gang_pick_slot_dict = defaultdict(list)
cycle = min(filter(lambda x: x > 0, scan_cycle)) for head, pick_slot in enumerate(scan_slot):
gang_pick_counter, gang_pick_slot_set = 0, set() if pick_slot == -1:
for head, pick_slot in enumerate(scan_slot): continue
gang_pick_slot_set.add(pick_slot - head * interval_ratio) gang_pick_slot_dict[pick_slot - head * interval_ratio].append(scan_cycle[head])
eval_func_short_term = e_gang_pick * (max_head_index - scan_slot.count(-1) - len( eval_func_long_term = 0
gang_pick_slot_set)) * cycle - e_nz_change * nozzle_counter for pick_cycle in gang_pick_slot_dict.values():
while pick_cycle:
min_cycle = min(pick_cycle)
eval_func_long_term += e_gang_pick * (len(pick_cycle) - 1) * min(pick_cycle)
pick_cycle = list(map(lambda c: c - min_cycle, pick_cycle))
pick_cycle = list(filter(lambda c: c > 0, pick_cycle))
eval_func_long_term -= e_nz_change * nozzle_counter
# 长期收益 # 拾取过程中的移动路径
gang_pick_slot_dict = defaultdict(list) pick_slot_set = set()
for head, pick_slot in enumerate(scan_slot): for head, pick_slot in enumerate(scan_slot):
if pick_slot == -1: if pick_slot == -1:
continue continue
gang_pick_slot_dict[pick_slot - head * interval_ratio].append(scan_cycle[head]) pick_slot_set.add(pick_slot - head * interval_ratio)
eval_func_long_term = 0 slot_offset = 0
for pick_cycle in gang_pick_slot_dict.values(): for head, part in enumerate(scan_part):
while pick_cycle: if part == -1:
min_cycle = min(pick_cycle) continue
eval_func_long_term += e_gang_pick * (len(pick_cycle) - 1) * min(pick_cycle) slot_offset += abs(scan_slot[head] - mount_center_slot[part])
pick_cycle = list(map(lambda c: c - min_cycle, pick_cycle))
pick_cycle = list(filter(lambda c: c > 0, pick_cycle))
eval_func_long_term -= e_nz_change * nozzle_counter
ratio = 0.5 ratio = 0.5
eval_func = (1 - ratio) * eval_func_short_term + ratio * eval_func_long_term eval_func = (1 - ratio) * eval_func_short_term + ratio * eval_func_long_term - 1e-5 * (
if eval_func >= scan_eval_func: max(pick_slot_set) - min(pick_slot_set)) - 1e-5 * slot_offset
scan_eval_func = eval_func if eval_func >= scan_eval_func:
best_scan_part, best_scan_cycle = scan_part.copy(), scan_cycle.copy() scan_eval_func = eval_func
best_scan_slot = scan_slot.copy() best_scan_part, best_scan_cycle = scan_part.copy(), scan_cycle.copy()
best_scan_slot = scan_slot.copy()
best_scan_nozzle_limit = copy.deepcopy(scan_nozzle_limit) best_scan_nozzle_limit = copy.deepcopy(scan_nozzle_limit)
if search_break: if search_break:
break break
scan_eval_func_list.append(scan_eval_func) scan_eval_func_list.append(scan_eval_func)
cur_scan_part = best_scan_part.copy() cur_scan_part = best_scan_part.copy()
cur_scan_slot = best_scan_slot.copy() cur_scan_slot = best_scan_slot.copy()
cur_scan_cycle = best_scan_cycle.copy() cur_scan_cycle = best_scan_cycle.copy()
cur_nozzle_limit = copy.deepcopy(best_scan_nozzle_limit) cur_nozzle_limit = copy.deepcopy(best_scan_nozzle_limit)
if len(scan_eval_func_list) != 0: if len(scan_eval_func_list) != 0:
if sum(scan_eval_func_list) >= best_assigned_eval_func: if sum(scan_eval_func_list) > best_assigned_eval_func:
best_assigned_eval_func = sum(scan_eval_func_list) best_assigned_eval_func = sum(scan_eval_func_list)
assigned_part = cur_scan_part.copy() assigned_part = cur_scan_part.copy()
assigned_slot = cur_scan_slot.copy() assigned_slot = cur_scan_slot.copy()
assigned_cycle = cur_scan_cycle.copy() assigned_cycle = cur_scan_cycle.copy()
nozzle_insert_cycle = cycle_index nozzle_insert_cycle = cycle_index
# 从供料器基座中移除对应数量的贴装点 # 从供料器基座中移除对应数量的贴装点
nonzero_cycle = [cycle for cycle in assigned_cycle if cycle > 0] nonzero_cycle = [cycle for cycle in assigned_cycle if cycle > 0]
if not nonzero_cycle: if not nonzero_cycle:
value_increment_base -= max_head_index value_increment_base -= max_head_index
continue
for head, slot in enumerate(assigned_slot):
if assigned_part[head] == -1:
continue continue
component_points[feeder_part[slot]] -= min(nonzero_cycle)
for head, slot in enumerate(assigned_slot): insert_cycle = sum([nozzle_mode_cycle[c] for c in range(nozzle_insert_cycle + 1)])
if assigned_part[head] == -1:
continue
component_points[feeder_part[slot]] -= min(nonzero_cycle)
component_result.insert(nozzle_insert_cycle, assigned_part) component_result.insert(insert_cycle, assigned_part)
cycle_result.insert(nozzle_insert_cycle, min(nonzero_cycle)) cycle_result.insert(insert_cycle, min(nonzero_cycle))
feeder_slot_result.insert(nozzle_insert_cycle, assigned_slot) feeder_slot_result.insert(insert_cycle, assigned_slot)
# 更新吸嘴匹配模式 # 更新吸嘴匹配模式
cycle_nozzle = nozzle_mode[nozzle_insert_cycle].copy() cycle_nozzle = nozzle_mode[nozzle_insert_cycle].copy()
for head, component in enumerate(assigned_part): for head, component in enumerate(assigned_part):
if component == -1: if component == -1:
continue continue
cycle_nozzle[head] = component_data.loc[component].nz cycle_nozzle[head] = component_data.loc[component].nz
if cycle_nozzle == nozzle_mode[nozzle_insert_cycle]:
nozzle_mode_cycle[nozzle_insert_cycle] += 1
else:
nozzle_mode.insert(nozzle_insert_cycle + 1, cycle_nozzle) nozzle_mode.insert(nozzle_insert_cycle + 1, cycle_nozzle)
nozzle_mode_cycle.insert(nozzle_insert_cycle + 1, 1)
pbar.update(len(pcb_data) - sum(component_points) - pbar_prev) if sum(component_points) == 0:
pbar_prev = len(pcb_data) - sum(component_points) break
if sum(component_points) == 0:
break
return component_result, cycle_result, feeder_slot_result return component_result, cycle_result, feeder_slot_result

View File

@ -12,38 +12,31 @@ from base_optimizer.result_analysis import *
def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, method='', hinter=False): def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, method='', hinter=False):
if method == 'cell_division': # 基于元胞分裂的遗传算法 if method == 'cell-division': # 基于元胞分裂的遗传算法
component_result, cycle_result, feeder_slot_result = optimizer_celldivision(pcb_data, component_data, component_result, cycle_result, feeder_slot_result = optimizer_celldivision(pcb_data, component_data)
hinter=False)
placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result, placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result,
cycle_result, feeder_slot_result) cycle_result, feeder_slot_result)
elif method == 'feeder_scan': # 基于基座扫描的供料器优先算法 elif method == 'feeder-scan': # 基于基座扫描的供料器优先算法
# 第1步分配供料器位置 component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data)
nozzle_pattern = feeder_allocate(component_data, pcb_data, feeder_data, figure=False)
# 第2步扫描供料器基座确定元件拾取的先后顺序
component_result, cycle_result, feeder_slot_result = feeder_base_scan(component_data, pcb_data, feeder_data,
nozzle_pattern)
# 第3步贴装路径规划
placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result, placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result,
cycle_result, feeder_slot_result) cycle_result, feeder_slot_result)
# placement_result, head_sequence = beam_search_for_route_generation(component_data, pcb_data, component_result, # placement_result, head_sequence = beam_search_for_route_generation(component_data, pcb_data, component_result,
# cycle_result, feeder_slot_result) # cycle_result, feeder_slot_result)
elif method == 'hybrid_genetic': # 基于拾取组的混合遗传算法 elif method == 'hybrid-genetic': # 基于拾取组的混合遗传算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_hybrid_genetic( component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_hybrid_genetic(
pcb_data, component_data, hinter=False) pcb_data, component_data, hinter=False)
elif method == 'aggregation': # 基于batch-level的整数规划 + 启发式算法 elif method == 'aggregation': # 基于batch-level的整数规划 + 启发式算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_aggregation( component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_aggregation(
component_data, pcb_data) component_data, pcb_data)
elif method == 'genetic_scanning': elif method == 'genetic-scanning':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_genetic_scanning( component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_genetic_scanning(
component_data, pcb_data, hinter=False) component_data, pcb_data, hinter=False)
elif method == 'mip_model': elif method == 'mip-model':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_mathmodel( component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_mathmodel(
component_data, pcb_data, hinter=True) component_data, pcb_data, hinter=True)
elif method == "two_phase": elif method == "two-phase":
component_result, feeder_slot_result, cycle_result = gurobi_optimizer(pcb_data, component_data, feeder_data, component_result, feeder_slot_result, cycle_result = gurobi_optimizer(pcb_data, component_data, feeder_data,
initial=True, partition=True, initial=True, partition=True,
reduction=True, hinter=hinter) reduction=True, hinter=hinter)
@ -51,32 +44,11 @@ def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, me
placement_result, head_sequence = scan_based_placement_route_generation(component_data, pcb_data, placement_result, head_sequence = scan_based_placement_route_generation(component_data, pcb_data,
component_result, cycle_result) component_result, cycle_result)
else: else:
raise 'method is not existed' raise 'machine optimizer method ' + method + ' is not existed'
info = OptInfo() # 估算贴装用时
assigned_nozzle = ['' if idx == -1 else component_data.loc[idx]['nz'] for idx in component_result[0]] info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
info.cycle_counter = sum(cycle_result) placement_result, head_sequence, hinter=False)
for cycle in range(len(cycle_result)):
pick_slot = set()
for head in range(max_head_index):
idx = component_result[cycle][head]
if idx == -1:
continue
nozzle = component_data.loc[idx]['nz']
if nozzle != assigned_nozzle[head]:
if assigned_nozzle[head] != '':
info.nozzle_change_counter += 1
assigned_nozzle[head] = nozzle
pick_slot.add(feeder_slot_result[cycle][head] - head * interval_ratio)
info.pickup_counter += len(pick_slot) * cycle_result[cycle]
pick_slot = list(pick_slot)
pick_slot.sort()
for idx in range(len(pick_slot) - 1):
info.pickup_movement += abs(pick_slot[idx + 1] - pick_slot[idx])
if hinter: if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
@ -85,12 +57,11 @@ def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, me
print('----- Placement machine ' + str(machine_index) + ' ----- ') print('----- Placement machine ' + str(machine_index) + ' ----- ')
print('-Cycle counter: {}'.format(info.cycle_counter)) print('-Cycle counter: {}'.format(info.cycle_counter))
print('-Nozzle change counter: {}'.format(info.nozzle_change_counter)) print(f'-Nozzle change counter: {info.nozzle_change_counter: d}')
print('-Pick operation counter: {}'.format(info.pickup_counter)) print(f'-ANC round: {info.anc_round_counter: d}')
print('-Pick movement: {}'.format(info.pickup_movement)) print(f'-Pick operation counter: {info.pickup_counter: d}')
print(f'-Pick time: {info.pickup_time: .3f}, distance: {info.pickup_distance: .3f}')
print(f'-Place time: {info.place_time: .3f}, distance: {info.place_distance: .3f}')
print('------------------------------ ') print('------------------------------ ')
# 估算贴装用时
info.placement_time = placement_time_estimate(component_data, pcb_data, component_result, cycle_result,
feeder_slot_result, placement_result, head_sequence, hinter=False)
return info return info

View File

@ -423,7 +423,7 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
component_assign.loc[cycle, 'H{}'.format(head + 1)] = '' component_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else: else:
part = component_data.loc[index]['part'] part = component_data.loc[index]['part']
component_assign.loc[cycle, 'H{}'.format(head + 1)] = part component_assign.loc[cycle, 'H{}'.format(head + 1)] = 'C' + str(index)
print(component_assign) print(component_assign)
print('') print('')
@ -446,32 +446,36 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
print('') print('')
def placement_time_estimate(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, def placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence, hinter=True) -> float: placement_result=None, head_sequence=None, hinter=False):
# === 优化结果参数 ===
info = OptInfo()
# === 校验 === # === 校验 ===
total_points = 0 info.total_points = 0
for cycle, components in enumerate(component_result): for cycle, components in enumerate(component_result):
for head, component in enumerate(components): for head, component in enumerate(components):
if component == -1: if component == -1:
continue continue
total_points += cycle_result[cycle] info.total_points += cycle_result[cycle]
if total_points != len(pcb_data): if info.total_points != len(pcb_data):
warning_info = 'the number of placement points is not match with the PCB data. ' warning_info = 'the number of placement points is not match with the PCB data. '
warnings.warn(warning_info, UserWarning) warnings.warn(warning_info, UserWarning)
return 0. return 0.
for placements in placement_result: if placement_result:
for placement in placements: total_points = info.total_points
if placement == -1: for placements in placement_result:
continue for placement in placements:
total_points -= 1 if placement == -1:
continue
total_points -= 1
if total_points != 0: if total_points != 0:
warnings.warn( warnings.warn(
'the optimization result of component assignment result and placement result are not consistent. ', 'the optimization result of component assignment result and placement result are not consistent. ',
UserWarning) UserWarning)
return 0. return 0.
feeder_arrangement = defaultdict(set) feeder_arrangement = defaultdict(set)
for cycle, feeder_slots in enumerate(feeder_slot_result): for cycle, feeder_slots in enumerate(feeder_slot_result):
@ -486,12 +490,6 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
warnings.warn(info, UserWarning) warnings.warn(info, UserWarning)
return 0. return 0.
total_pickup_time, total_round_time, total_place_time = .0, .0, 0 # 拾取用时、往返用时、贴装用时
total_operation_time = .0 # 操作用时
total_nozzle_change_counter = 0 # 总吸嘴更换次数
total_pick_counter = 0 # 总拾取次数
total_mount_distance, total_pick_distance = .0, .0 # 贴装距离、拾取距离
total_distance = 0 # 总移动距离
cur_pos, next_pos = anc_marker_pos, [0, 0] # 贴装头当前位置 cur_pos, next_pos = anc_marker_pos, [0, 0] # 贴装头当前位置
# 初始化首个周期的吸嘴装配信息 # 初始化首个周期的吸嘴装配信息
@ -503,7 +501,6 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
continue continue
else: else:
nozzle_assigned[head] = component_data.loc[idx]['nz'] nozzle_assigned[head] = component_data.loc[idx]['nz']
break
for cycle_set, _ in enumerate(component_result): for cycle_set, _ in enumerate(component_result):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)]) floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
@ -527,9 +524,9 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
next_pos = anc_marker_pos next_pos = anc_marker_pos
move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0), move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
axis_moving_time(cur_pos[1] - next_pos[1], 1)) axis_moving_time(cur_pos[1] - next_pos[1], 1))
total_round_time += move_time info.round_time += move_time
info.anc_round_counter += 1
total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1])) info.total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
cur_pos = next_pos cur_pos = next_pos
pick_slot = list(set(pick_slot)) pick_slot = list(set(pick_slot))
@ -541,94 +538,95 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
next_pos = [slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1]] next_pos = [slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1]]
else: else:
next_pos = [slotr1_pos[0] - slot_interval * (max_slot_index - slot - 1), slotr1_pos[1]] next_pos = [slotr1_pos[0] - slot_interval * (max_slot_index - slot - 1), slotr1_pos[1]]
total_operation_time += t_pick info.operation_time += t_pick
total_pick_counter += 1 info.pickup_counter += 1
move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0), move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
axis_moving_time(cur_pos[1] - next_pos[1], 1)) axis_moving_time(cur_pos[1] - next_pos[1], 1))
if idx == 0: if idx == 0:
total_round_time += move_time info.round_time += move_time
else: else:
total_pickup_time += move_time info.pickup_time += move_time
total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1])) info.total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
if slot != pick_slot[0]: if slot != pick_slot[0]:
total_pick_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1])) info.pickup_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
cur_pos = next_pos cur_pos = next_pos
# 固定相机检测 # 固定相机检测
for head in range(max_head_index): # for head in range(max_head_index):
if component_result[cycle_set][head] == -1: # if component_result[cycle_set][head] == -1:
continue # continue
camera = component_data.loc[component_result[cycle_set][head]]['camera'] # camera = component_data.loc[component_result[cycle_set][head]]['camera']
if camera == '固定相机': # if camera == '固定相机':
next_pos = [fix_camera_pos[0] - head * head_interval, fix_camera_pos[1]] # next_pos = [fix_camera_pos[0] - head * head_interval, fix_camera_pos[1]]
move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0), # move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
axis_moving_time(cur_pos[1] - next_pos[1], 1)) # axis_moving_time(cur_pos[1] - next_pos[1], 1))
total_round_time += move_time # info.round_time += move_time
#
total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1])) # info.total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
total_operation_time += t_fix_camera_check # info.operation_time += t_fix_camera_check
cur_pos = next_pos # cur_pos = next_pos
# 贴装路径 # 贴装路径
for head in head_sequence[cycle]: if placement_result and head_sequence:
index = placement_result[cycle][head] for head in head_sequence[cycle]:
if index == -1: index = placement_result[cycle][head]
continue if index == -1:
mount_pos.append([pcb_data.iloc[index]['x'] - head * head_interval + stopper_pos[0], continue
pcb_data.iloc[index]['y'] + stopper_pos[1]]) mount_pos.append([pcb_data.iloc[index]['x'] - head * head_interval + stopper_pos[0],
mount_angle.append(pcb_data.iloc[index]['r']) pcb_data.iloc[index]['y'] + stopper_pos[1]])
mount_angle.append(pcb_data.iloc[index]['r'])
# 单独计算贴装路径 # 单独计算贴装路径
for cntPoints in range(len(mount_pos) - 1): for cntPoints in range(len(mount_pos) - 1):
total_mount_distance += max(abs(mount_pos[cntPoints][0] - mount_pos[cntPoints + 1][0]), info.place_distance += max(abs(mount_pos[cntPoints][0] - mount_pos[cntPoints + 1][0]),
abs(mount_pos[cntPoints][1] - mount_pos[cntPoints + 1][1])) abs(mount_pos[cntPoints][1] - mount_pos[cntPoints + 1][1]))
# 考虑R轴预旋转补偿同轴角度转动带来的额外贴装用时 # 考虑R轴预旋转补偿同轴角度转动带来的额外贴装用时
total_operation_time += head_rotary_time(mount_angle[0]) # 补偿角度转动带来的额外贴装用时 info.operation_time += head_rotary_time(mount_angle[0]) # 补偿角度转动带来的额外贴装用时
total_operation_time += t_nozzle_put * nozzle_put_counter + t_nozzle_pick * nozzle_pick_counter info.operation_time += t_nozzle_put * nozzle_put_counter + t_nozzle_pick * nozzle_pick_counter
for idx, pos in enumerate(mount_pos): for idx, pos in enumerate(mount_pos):
total_operation_time += t_place info.operation_time += t_place
move_time = max(axis_moving_time(cur_pos[0] - pos[0], 0), axis_moving_time(cur_pos[1] - pos[1], 1)) move_time = max(axis_moving_time(cur_pos[0] - pos[0], 0), axis_moving_time(cur_pos[1] - pos[1], 1))
if idx == 0: if idx == 0:
total_round_time += move_time info.round_time += move_time
else: else:
total_place_time += move_time info.place_time += move_time
total_distance += max(abs(cur_pos[0] - pos[0]), abs(cur_pos[1] - pos[1])) info.total_distance += max(abs(cur_pos[0] - pos[0]), abs(cur_pos[1] - pos[1]))
cur_pos = pos cur_pos = pos
total_nozzle_change_counter += nozzle_put_counter + nozzle_pick_counter info.nozzle_change_counter += nozzle_put_counter + nozzle_pick_counter
total_time = total_pickup_time + total_round_time + total_place_time + total_operation_time
minutes, seconds = int(total_time // 60), int(total_time) % 60
millisecond = int((total_time - minutes * 60 - seconds) * 60)
info.total_time = info.pickup_time + info.round_time + info.place_time + info.operation_time
minutes, seconds = int(info.total_time // 60), int(info.total_time) % 60
millisecond = int((info.total_time - minutes * 60 - seconds) * 60)
info.cycle_counter = sum(cycle_result)
if hinter: if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result, optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=False, component_hinter=False, feeder_hinter=False) nozzle_hinter=False, component_hinter=False, feeder_hinter=False)
print('-Cycle counter: {}'.format(sum(cycle_result))) print('-Cycle counter: {}'.format(info.cycle_counter))
print('-Nozzle change counter: {}'.format(total_nozzle_change_counter // 2)) print('-Nozzle change counter: {}'.format(info.nozzle_change_counter // 2))
print('-Pick operation counter: {}'.format(total_pick_counter)) print('-Pick operation counter: {}'.format(info.pickup_counter))
print('-Expected mounting tour length: {} mm'.format(total_mount_distance)) print('-Expected mounting tour length: {} mm'.format(info.place_distance))
print('-Expected picking tour length: {} mm'.format(total_pick_distance)) print('-Expected picking tour length: {} mm'.format(info.pickup_distance))
print('-Expected total tour length: {} mm'.format(total_distance)) print('-Expected total tour length: {} mm'.format(info.total_distance))
print('-Expected total moving time: {} s with pick: {}, round: {}, place = {}'.format( print('-Expected total moving time: {} s with pick: {}, round: {}, place = {}'.format(
total_pickup_time + total_round_time + total_place_time, total_pickup_time, total_round_time, info.pickup_time + info.round_time + info.place_time, info.pickup_time, info.round_time,
total_place_time)) info.place_time))
print('-Expected total operation time: {} s'.format(total_operation_time)) print('-Expected total operation time: {} s'.format(info.operation_time))
if minutes > 0: if minutes > 0:
print('-Mounting time estimation: {:d} min {} s {:2d} ms ({:.3f}s)'.format(minutes, seconds, millisecond, print('-Mounting time estimation: {:d} min {} s {:2d} ms ({:.3f}s)'.format(minutes, seconds, millisecond,
total_time)) info.total_time))
else: else:
print('-Mounting time estimation: {} s {:2d} ms ({:.3f}s)'.format(seconds, millisecond, total_time)) print('-Mounting time estimation: {} s {:2d} ms ({:.3f}s)'.format(seconds, millisecond, info.total_time))
return total_time return info

View File

@ -2,10 +2,39 @@ from base_optimizer.optimizer_common import *
def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_feeder_data=True, cp_auto_register=False): def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_feeder_data=True, cp_auto_register=False):
# 读取PCB数据 # 读取PCB数据
filename = 'data/' + filename filename = 'data/' + filename
pcb_data = pd.DataFrame(pd.read_csv(filepath_or_buffer=filename, sep='\t', header=None)) part_content, step_content = False, False
part_start_line, step_start_line, part_end_line, step_end_line = -1, -1, -1, -1
line_counter = 0
with open(filename, 'r') as file:
line = file.readline()
while line:
if line == '[Part]\n':
part_content = True
part_start_line = line_counter
elif line == '[Step]\n':
step_content = True
step_start_line = line_counter
elif line == '\n':
if part_content:
part_content = False
part_end_line = line_counter
elif step_content:
step_content = False
step_end_line = line_counter
line_counter += 1
line = file.readline()
if part_content:
part_end_line = line_counter
elif step_content:
step_end_line = line_counter
pcb_data = pd.DataFrame(
pd.read_csv(filepath_or_buffer=filename, skiprows=step_start_line + 1, nrows=step_end_line - step_start_line + 1,
sep='\t', header=None))
if len(pcb_data.columns) <= 17: if len(pcb_data.columns) <= 17:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar",
"pl", "lv"] "pl", "lv"]
@ -21,20 +50,25 @@ def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_fee
# 坐标系处理 # 坐标系处理
# pcb_data = pcb_data.sort_values(by = ['x', 'y'], ascending = True) # pcb_data = pcb_data.sort_values(by = ['x', 'y'], ascending = True)
# pcb_data["x"] = pcb_data["x"].apply(lambda x: -x) # pcb_data["x"] = pcb_data["x"].apply(lambda x: -100+x)
# 注册元件检查 # 注册元件检查
part_feeder_assign = defaultdict(set) part_feeder_assign = defaultdict(set)
part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points'] part_col = ["part", "fdr", "nz", 'feeder-limit']
try: try:
if load_cp_data: if part_start_line != -1:
component_data = pd.DataFrame(pd.read_csv(filepath_or_buffer='component.txt', sep='\t', header=None), component_data = pd.DataFrame(
columns=part_col) pd.read_csv(filepath_or_buffer=filename, sep='\t', header=None, skiprows=part_start_line + 1,
nrows=part_end_line - part_start_line - 1))
component_data.columns = part_col
else: else:
component_data = pd.DataFrame(columns=part_col) component_data = pd.DataFrame(columns=part_col)
except: except:
component_data = pd.DataFrame(columns=part_col) component_data = pd.DataFrame(columns=part_col)
component_data['points'] = 0
part_col.append('points')
for _, data in pcb_data.iterrows(): for _, data in pcb_data.iterrows():
part, nozzle = data.part, data.nz.split(' ')[1] part, nozzle = data.part, data.nz.split(' ')[1]
slot = data['fdr'].split(' ')[0] slot = data['fdr'].split(' ')[0]
@ -43,10 +77,10 @@ def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_fee
raise Exception("unregistered component: " + component_data['part'].values) raise Exception("unregistered component: " + component_data['part'].values)
else: else:
component_data = pd.concat([component_data, pd.DataFrame( component_data = pd.concat([component_data, pd.DataFrame(
[part, '', 'SM8', nozzle, '飞行相机1', 'CHIP-Rect', default_feeder_limit, 0], index=part_col).T], [part, '', 'SM8', nozzle, default_feeder_limit, 0], index=part_col).T],
ignore_index=True) ignore_index=True)
# warning_info = 'register component ' + part + ' with default feeder type' warning_info = 'register component ' + part + ' with default feeder type'
# warnings.warn(warning_info, UserWarning) warnings.warn(warning_info, UserWarning)
part_index = component_data[component_data['part'] == part].index.tolist()[0] part_index = component_data[component_data['part'] == part].index.tolist()[0]
part_feeder_assign[part].add(slot) part_feeder_assign[part].add(slot)
component_data.loc[part_index, 'points'] += 1 component_data.loc[part_index, 'points'] += 1
@ -54,7 +88,8 @@ def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_fee
if nozzle != 'A' and component_data.loc[part_index, 'nz'] != nozzle: if nozzle != 'A' and component_data.loc[part_index, 'nz'] != nozzle:
warning_info = 'the nozzle type of component ' + part + ' is not consistent with the pcb data' warning_info = 'the nozzle type of component ' + part + ' is not consistent with the pcb data'
warnings.warn(warning_info, UserWarning) warnings.warn(warning_info, UserWarning)
# 清除点数为0的数据
component_data = component_data[component_data['points'] != 0].reset_index(drop=True)
for idx, data in component_data.iterrows(): for idx, data in component_data.iterrows():
if data['fdr'][0:3] == 'SME': # 电动供料器和气动供料器参数一致 if data['fdr'][0:3] == 'SME': # 电动供料器和气动供料器参数一致
component_data.at[idx, 'fdr'] = data['fdr'][0:2] + data['fdr'][3:] component_data.at[idx, 'fdr'] = data['fdr'][0:2] + data['fdr'][3:]

241
estimator.py Normal file
View File

@ -0,0 +1,241 @@
from generator import *
from base_optimizer.optimizer_interface import *
class Net(torch.nn.Module):
def __init__(self, input_size, hidden_size=1000, output_size=1):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU() # 激活函数
self.fc2 = torch.nn.Linear(hidden_size, hidden_size)
# self.relu1 = torch.nn.ReLU() # 激活函数
self.fc3 = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.fc1(x)
# x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
class LSTMNet(torch.nn.Module):
def __init__(self, input_size, hidden_size=256, output_size=1, num_layers=1):
super(LSTMNet, self).__init__()
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers)
self.fc = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
x, _ = self.lstm(x) # x is input with size (seq_len, batch_size, input_size)
x = self.fc(x)
return x[-1, :, ]
class Estimator:
def __init__(self, task_block_weight=None):
self.data_mgr = DataMgr()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device)
self.net.load_state_dict(torch.load('model/net_model.pth'))
self.task_block_weight = task_block_weight
with open('model/lr_model.pkl', 'rb') as f:
self.lr = pickle.load(f)
def convert(self, pcb_data, component_data, assignment_result):
machine_num, component_num = len(assignment_result), len(component_data)
component_machine_index = [0 for _ in range(component_num)]
machine_points = [[[] for _ in range(component_num)] for _ in range(machine_num)]
component2idx = defaultdict(int)
for i, data in component_data.iterrows():
component2idx[data.part] = i
for i in range(len(pcb_data)):
part_index = component2idx[pcb_data.iat[i, 5]]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]):
component_machine_index[part_index] += 1
machine_index += 1
else:
break
for _, data in pcb_data.iterrows():
part_index = component2idx[data.part]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == len(machine_points[machine_index][part_index]):
component_machine_index[part_index] += 1
machine_index += 1
else:
break
machine_points[machine_index][part_index].append([data.x, data.y])
res = []
for machine_index in range(machine_num):
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
cp_width, cp_height = defaultdict(float), defaultdict(float)
board_right_pos, board_left_pos, board_top_pos, board_bottom_pos = None, None, None, None
for part_index in range(component_num):
if assignment_result[machine_index][part_index] == 0:
continue
cp_points[part_index] = assignment_result[machine_index][part_index]
cp_nozzle[part_index] = component_data.iloc[part_index]['nz']
cp_right_pos, cp_left_pos = max([p[0] for p in machine_points[machine_index][part_index]]), min(
[p[0] for p in machine_points[machine_index][part_index]])
cp_top_pos, cp_bottom_pos = max([p[1] for p in machine_points[machine_index][part_index]]), min(
[p[1] for p in machine_points[machine_index][part_index]])
cp_width[part_index] = cp_right_pos - cp_left_pos
cp_height[part_index] = cp_top_pos - cp_bottom_pos
if board_right_pos is None or cp_right_pos > board_right_pos:
board_right_pos = cp_right_pos
if board_left_pos is None or cp_left_pos < board_left_pos:
board_left_pos = cp_left_pos
if board_top_pos is None or cp_top_pos > board_top_pos:
board_top_pos = cp_top_pos
if board_bottom_pos is None or cp_bottom_pos < board_bottom_pos:
board_bottom_pos = cp_bottom_pos
res.append([cp_points, cp_nozzle, cp_width, cp_height, board_right_pos - board_left_pos,
board_top_pos - board_bottom_pos])
return res
def neural_network(self, cp_points, cp_nozzle, board_width, board_height):
encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
return self.net(encoding)[0, 0].item()
def heuristic_reconfiguration(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
nozzle_points[cp_nozzle[part]] += points
nozzle_heads[cp_nozzle[part]] = 1
remaining_head = max_head_index - len(nozzle_heads)
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(self.task_block_weight, math.ceil(nozzle_points[nozzle] / heads_number))
return (t_pick + t_place) * total_point_number + task_block_number * self.task_block_weight
def heuristic_genetic(self, cp_points, cp_nozzle):
nozzle_points, nozzle_component_points = defaultdict(int), defaultdict(list)
for idx, nozzle in cp_nozzle.items():
if cp_points[idx] == 0:
continue
nozzle_points[nozzle] += cp_points[idx]
nozzle_component_points[cp_nozzle[idx]] = [0] * len(cp_points)
for idx, (part_index, points) in enumerate(cp_points.items()):
nozzle_component_points[cp_nozzle[part_index]][idx] = points
total_points = sum(cp_points.values()) # num of placement points
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
# assignments of nozzles to heads
wl = 0 # num of workload
total_heads = (1 + ul) * max_head_index - len(nozzle_points)
nozzle_heads = defaultdict(int)
for nozzle in nozzle_points.keys():
if nozzle_points[nozzle] == 0:
continue
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / total_points * total_heads)
nozzle_heads[nozzle] += 1
total_heads = (1 + ul) * max_head_index
for heads in nozzle_heads.values():
total_heads -= heads
while True:
nozzle = max(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x])
if total_heads == 0:
break
nozzle_heads[nozzle] += 1
total_heads -= 1
# averagely assign placements to heads
heads_placement = []
for nozzle in nozzle_heads.keys():
points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle])
heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])]
nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points)
for idx in range(len(heads_placement) - 1, -1, -1):
if nozzle_points[nozzle] <= 0:
break
nozzle_points[nozzle] -= 1
heads_placement[idx][1] += 1
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
# the number of pick-up operations
# (under the assumption of the number of feeder available for each comp. type is equal 1)
pl = 0
heads_placement_points = [0 for _ in range(max_head_index)]
while True:
head_assign_point = []
for head in range(max_head_index):
if heads_placement_points[head] != 0 or heads_placement[head] == 0:
continue
nozzle, points = heads_placement[head]
max_comp_index = np.argmax(nozzle_component_points[nozzle])
heads_placement_points[head] = min(points, nozzle_component_points[nozzle][max_comp_index])
nozzle_component_points[nozzle][max_comp_index] -= heads_placement_points[head]
head_assign_point.append(heads_placement_points[head])
min_points_list = list(filter(lambda x: x > 0, heads_placement_points))
if len(min_points_list) == 0 or len(head_assign_point) == 0:
break
pl += max(head_assign_point)
for head in range(max_head_index):
heads_placement[head][1] -= min(min_points_list)
heads_placement_points[head] -= min(min_points_list)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
return T_pp * total_points + T_tr * wl + T_nc * ul + T_pl * pl
def linear_regression(self, pcb_data, component_data):
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
hinter=False)
info = placement_info_evaluation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result)
regression_info = [[info.cycle_counter, info.nozzle_change_counter, info.anc_round_counter,
info.pickup_counter, info.total_points]]
return self.lr.predict(regression_info)[0, 0]

View File

@ -11,31 +11,31 @@ class DataMgr:
self.min_placement_points = 100 self.min_placement_points = 100
self.max_placement_points = 800 self.max_placement_points = 800
self.max_component_types = 30 self.max_component_types = 40
self.default_feeder_limit = 1 self.default_feeder_limit = 1
self.nozzle_type_list = ['CN065', 'CN140', 'CN220', 'CN040'] self.max_nozzle_types = 4
# self.x_range = [50, 100, 150, 200, 300, 400, 500] self.x_range = [50, 100, 150, 200, 300, 400, 500]
# self.y_range = [50, 100, 150, 200, 300, 400, 500] self.y_range = [50, 100, 150, 200, 300, 400, 500]
self.x_range = [400]
self.y_range = [200]
self.counter = 0 self.counter = 0
self.update = 10 self.update = 1
self.pre_file = None self.pre_file = None
self.part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points'] self.part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points']
self.component_data = pd.DataFrame(columns=self.part_col) # the component list update for several rounds self.component_data = pd.DataFrame(columns=self.part_col) # the component list update for several rounds
def generator(self, mode='Train'): def generator(self, mode='Train'):
boundary = [random.choice(self.x_range), random.choice(self.y_range)] boundary = [random.choice(self.x_range), random.choice(self.y_range)]
if boundary[0] < boundary[-1]:
boundary[0], boundary[-1] = boundary[-1], boundary[0]
nozzle_type_list = random.sample(['CN065', 'CN220', 'CN040', 'CN140'], self.max_nozzle_types)
# determine the nozzle type of component # determine the nozzle type of component
if self.counter % 10 == 0 or mode == 'test': if self.counter % self.get_update_round() == 0 or mode == 'test':
self.component_data = self.component_data.loc[[]] self.component_data = self.component_data.loc[[]]
total_points = random.randint(self.min_placement_points, self.max_placement_points) total_points = random.randint(self.min_placement_points, self.max_placement_points)
total_nozzles = random.randint(1, len(self.nozzle_type_list)) total_nozzles = random.randint(1, self.max_nozzle_types)
selected_nozzle = random.sample(self.nozzle_type_list, total_nozzles) selected_nozzle = random.sample(nozzle_type_list, total_nozzles)
for cp_idx in range(min(random.randint(1, self.max_component_types), total_points)): for cp_idx in range(min(random.randint(1, self.max_component_types), total_points)):
part, nozzle = 'C' + str(cp_idx), random.choice(selected_nozzle) part, nozzle = 'C' + str(cp_idx), random.choice(selected_nozzle)
self.component_data = pd.concat([self.component_data, pd.DataFrame( self.component_data = pd.concat([self.component_data, pd.DataFrame(
@ -63,17 +63,19 @@ class DataMgr:
return pcb_data, self.component_data return pcb_data, self.component_data
def recorder(self, file_handle, info: OptInfo, pcb_data, component_data): def recorder(self, file_handle, info: OptInfo, pcb_data, component_data):
lineinfo = '{:.6f}'.format(info.placement_time) + '\t' + str(info.cycle_counter) + '\t' + str( # 7个参数总时间周期数吸嘴更换数ANC往返次数拾取次数拾取路径贴装路径
info.nozzle_change_counter) + '\t' + str(info.pickup_counter) + '\t' + '{:.3f}'.format( lineinfo = '{:.3f}'.format(info.total_time) + '\t' + str(info.cycle_counter) + '\t' + str(
info.pickup_movement) + '\t' + '{:.3f}'.format(info.placement_movement) info.nozzle_change_counter) + '\t' + str(info.anc_round_counter) + '\t' + str(
info.pickup_counter) + '\t' + '{:.3f}'.format(info.pickup_distance) + '\t' + '{:.3f}'.format(
info.place_distance)
# 2个参数 PCB尺寸
lineinfo += '\t' + '{:.3f}'.format(pcb_data['x'].max() - pcb_data['x'].min()) + '\t' + '{:.3f}'.format( lineinfo += '\t' + '{:.3f}'.format(pcb_data['x'].max() - pcb_data['x'].min()) + '\t' + '{:.3f}'.format(
pcb_data['y'].max() - pcb_data['y'].min()) pcb_data['y'].max() - pcb_data['y'].min())
part_xposition, part_yposition = defaultdict(list), defaultdict(list) # part_position = defaultdict(list)
for _, data in pcb_data.iterrows(): # for _, data in pcb_data.iterrows():
part_xposition[data['part']].append(data['x']) # part_position[data['part']].append([data['x'], data['y']])
part_yposition[data['part']].append(data['y'])
point_counter, component_counter = 0, 0 point_counter, component_counter = 0, 0
nozzle_type = set() nozzle_type = set()
@ -84,17 +86,20 @@ class DataMgr:
point_counter += data.points point_counter += data.points
component_counter += 1 component_counter += 1
# 3个参数总点数总元件数总吸嘴数
lineinfo += '\t' + str(point_counter) + '\t' + str(component_counter) + '\t' + str(len(nozzle_type)) lineinfo += '\t' + str(point_counter) + '\t' + str(component_counter) + '\t' + str(len(nozzle_type))
# 5 x 元件种类数 个参数: 元件名,吸嘴类型,点数,布局宽度,布局高度
for _, data in component_data.iterrows(): for _, data in component_data.iterrows():
if data.points == 0:
continue
lineinfo += '\t' + data.part + '\t' + data.nz + '\t' + str(data.points) lineinfo += '\t' + data.part + '\t' + data.nz + '\t' + str(data.points)
# lineinfo += '\t' + str( # lineinfo += '\t' + '{:.3f}'.format(np.ptp([pos[0] for pos in part_position[data.part]]))
# round((np.average(part_xposition[data.part]) + stopper_pos[0] - slotf1_pos[0]) / slot_interval)) # lineinfo += '\t' + '{:.3f}'.format(np.ptp([pos[1] for pos in part_position[data.part]]))
lineinfo += '\n' lineinfo += '\n'
file_handle.write(lineinfo) file_handle.write(lineinfo)
def saver(self, file_path: str, pcb_data): def saver(self, file_path: str, pcb_data):
lineinfo = '' lineinfo = ''
for _, data in pcb_data.iterrows(): for _, data in pcb_data.iterrows():
@ -112,43 +117,260 @@ class DataMgr:
os.remove(self.pre_file) os.remove(self.pre_file)
self.pre_file = None self.pre_file = None
def encode(self, cp_points: defaultdict[str], cp_nozzle: defaultdict[int], width, height): def encode(self, cp_points: defaultdict[str], cp_nozzle: defaultdict[str], board_width, board_height):
cp2nz = defaultdict(int)
for idx, nozzle in enumerate(self.nozzle_type_list):
cp2nz[nozzle] = idx
# === general info === # === general info ===
total_points = sum(points for points in cp_points.values()) total_points = sum(points for points in cp_points.values())
total_component_types, total_nozzle_types = len(cp_points.keys()), len(set(cp_nozzle.values())) total_component_types, total_nozzle_types = len(cp_points.keys()), len(set(cp_nozzle.values()))
data = [total_points, total_component_types, total_nozzle_types] data = [total_points, total_component_types, total_nozzle_types]
data.extend([width, height]) data.extend([board_width, board_height])
# === heuristic info ===
cycle, nozzle_change, anc_move, pickup = self.heuristic_estimator(cp_points, cp_nozzle)
data.extend([cycle, nozzle_change, anc_move, pickup])
# === nozzle info === # === nozzle info ===
data_slice = [0 for _ in range(len(self.nozzle_type_list))] nozzle_points = defaultdict(int)
for component, points in cp_points.items(): for cp_idx, nozzle in cp_nozzle.items():
idx = cp2nz[cp_nozzle[component]] nozzle_points[cp_nozzle[cp_idx]] += cp_points[cp_idx] # points for different nozzle type
data_slice[idx] += points nozzle_items = [[nozzle, points] for nozzle, points in nozzle_points.items()]
data.extend(data_slice) nozzle_items = sorted(nozzle_items, key=lambda x: x[1], reverse=True)
nz2idx = defaultdict(int)
nozzle_slice = [0 for _ in range(self.max_nozzle_types)]
for idx, [nozzle, points] in enumerate(nozzle_items):
nz2idx[nozzle] = idx
nozzle_slice[idx] = points
data.extend(nozzle_slice)
# === component info === # === component info ===
# cp_items = [[component, points] for component, points in cp_points.items()]
# cp_items = sorted(cp_items, key=lambda x: (x[1], nz2idx[cp_nozzle[x[0]]] * 0.1 + x[1]), reverse=True)
# for component, points in cp_items:
# nozzle = cp_nozzle[component]
#
# data_slice = [0 for _ in range(self.max_nozzle_types)]
# data_slice[nz2idx[nozzle]] = points
# data.extend(data_slice)
#
# assert self.max_component_types >= total_component_types
# for _ in range(self.max_component_types - total_component_types):
# data.extend([0 for _ in range(self.max_nozzle_types)])
# === new component info ===
comp_data_slice = defaultdict(list)
for idx in range(self.max_nozzle_types):
comp_data_slice[idx] = []
cp_items = [[component, points] for component, points in cp_points.items()] cp_items = [[component, points] for component, points in cp_points.items()]
cp_items = sorted(cp_items, key=lambda x: (-x[1], x[0])) cp_items = sorted(cp_items, key=lambda x: (x[1], nz2idx[cp_nozzle[x[0]]] * 0.1 + x[1]), reverse=True)
for component, points in cp_items: for component, points in cp_items:
nozzle = cp_nozzle[component] nozzle = cp_nozzle[component]
comp_data_slice[nz2idx[nozzle]].append(points)
data_slice = [0 for _ in range(len(self.nozzle_type_list))] data_slice = [0 for _ in range(self.max_nozzle_types)]
data_slice[cp2nz[nozzle]] = points for idx in range(self.max_nozzle_types):
data.extend(data_slice) data_slice[idx] = len(comp_data_slice[idx])
data.extend(data_slice)
for _ in range(self.max_component_types - total_component_types): for idx in range(self.max_nozzle_types):
data.extend([0 for _ in range(len(self.nozzle_type_list))]) comp_data_slice[idx].extend([0 for _ in range(self.max_component_types - len(comp_data_slice[idx]))])
data.extend(comp_data_slice[idx])
return data return data
def heuristic_estimator(self, cp_points, cp_nozzle):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for idx, points in cp_points.items():
if points == 0:
continue
nozzle = cp_nozzle[idx]
nozzle_points[nozzle] += points
nozzle_heads[nozzle] = 1
anc_round_counter = 0
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
head_nozzle_assignment, min_cost = None, None
# generate initial nozzle group
nozzle_group = []
# averagely assign for the same type of nozzles, and generate nozzle group
nozzle_points_cpy = copy.deepcopy(nozzle_points)
for nozzle, heads in nozzle_heads.items():
points = nozzle_points_cpy[nozzle] // heads
for _ in range(heads):
nozzle_group.append([nozzle, points])
nozzle_points_cpy[nozzle] -= heads * points
for idx, [nozzle, _] in enumerate(nozzle_group):
if nozzle_points_cpy[nozzle]:
nozzle_group[idx][1] += 1
nozzle_points_cpy[nozzle] -= 1
while True:
# assign nozzle group to each head
nozzle_group.sort(key=lambda x: -x[1])
tmp_head_nozzle_assignment = []
head_total_points = [0 for _ in range(max_head_index)]
for idx, nozzle_item in enumerate(nozzle_group):
if idx < max_head_index:
tmp_head_nozzle_assignment.append([nozzle_item.copy()])
head_total_points[idx] += nozzle_item[1]
else:
min_head = np.argmin(head_total_points)
tmp_head_nozzle_assignment[min_head].append(nozzle_item.copy())
head_total_points[min_head] += nozzle_item[1]
cost = t_cycle * max(head_total_points)
for head in range(max_head_index):
for cycle in range(len(tmp_head_nozzle_assignment[head])):
if cycle + 1 == len(tmp_head_nozzle_assignment[head]):
if tmp_head_nozzle_assignment[head][cycle][0] != tmp_head_nozzle_assignment[head][-1][0]:
cost += t_nozzle_change
else:
if tmp_head_nozzle_assignment[head][cycle][0] != tmp_head_nozzle_assignment[head][cycle + 1][0]:
cost += t_nozzle_change
while True:
min_head, max_head = np.argmin(head_total_points), np.argmax(head_total_points)
min_head_nozzle, max_head_nozzle = tmp_head_nozzle_assignment[min_head][-1][0], \
tmp_head_nozzle_assignment[max_head][-1][0]
if min_head_nozzle == max_head_nozzle:
break
min_head_list, max_head_list = [min_head], [max_head]
minmax_head_points = 0
for head in range(max_head_index):
if head in min_head_list or head in max_head_list:
minmax_head_points += head_total_points[head]
continue
# the max/min heads with the sum nozzle type
if tmp_head_nozzle_assignment[head][-1][0] == tmp_head_nozzle_assignment[min_head][-1][0]:
min_head_list.append(head)
minmax_head_points += head_total_points[head]
if tmp_head_nozzle_assignment[head][-1][0] == tmp_head_nozzle_assignment[max_head][-1][0]:
max_head_list.append(head)
minmax_head_points += head_total_points[head]
# todo: restriction of available nozzle
# the reduction of cycles is not offset the cost of nozzle change
average_points = minmax_head_points // (len(min_head_list) + len(max_head_list))
reminder_points = minmax_head_points % (len(min_head_list) + len(max_head_list))
max_cycle = average_points + (1 if reminder_points > 0 else 0)
for head in range(max_head_index):
if head in min_head_list or head in max_head_list:
continue
max_cycle = max(max_cycle, head_total_points[head])
nozzle_change_counter = 0
for head in min_head_list:
if tmp_head_nozzle_assignment[head][0] == tmp_head_nozzle_assignment[head][-1]:
nozzle_change_counter += 2
else:
nozzle_change_counter += 1
if t_cycle * (max(head_total_points) - max_cycle) < t_nozzle_change * nozzle_change_counter:
break
cost -= t_cycle * (max(head_total_points) - max_cycle) - t_nozzle_change * nozzle_change_counter
required_points = 0 # 待均摊的贴装点数较多的吸嘴类型
for head in min_head_list:
points = average_points - head_total_points[head]
tmp_head_nozzle_assignment[head].append([max_head_nozzle, points])
head_total_points[head] = average_points
required_points += points
for head in max_head_list:
tmp_head_nozzle_assignment[head][-1][1] -= required_points // len(max_head_list)
head_total_points[head] -= required_points // len(max_head_list)
required_points -= (required_points // len(max_head_list)) * len(max_head_list)
for head in max_head_list:
if required_points <= 0:
break
tmp_head_nozzle_assignment[head][-1][1] -= 1
head_total_points[head] -= 1
required_points -= 1
if min_cost is None or cost < min_cost:
min_cost = cost
head_nozzle_assignment = copy.deepcopy(tmp_head_nozzle_assignment)
else:
break
# 在吸嘴组中增加一个吸嘴
idx, nozzle = 0, nozzle_group[0][0]
for idx, [nozzle_, _] in enumerate(nozzle_group):
if nozzle_ != nozzle:
break
average_points, remainder_points = nozzle_points[nozzle] // (idx + 1), nozzle_points[nozzle] % (idx + 1)
nozzle_group.append([nozzle, 0])
for idx, [nozzle_, _] in enumerate(nozzle_group):
if nozzle_ == nozzle:
nozzle_group[idx][1] = average_points + (1 if remainder_points > 0 else 0)
remainder_points -= 1
cycle_counter, nozzle_change_counter = 0, 0
for head in range(max_head_index):
head_cycle_counter = 0
for cycle in range(len(head_nozzle_assignment[head])):
if cycle + 1 == len(head_nozzle_assignment[head]):
if head_nozzle_assignment[head][0][0] != head_nozzle_assignment[head][-1][0]:
nozzle_change_counter += 1
else:
if head_nozzle_assignment[head][cycle][0] != head_nozzle_assignment[head][cycle + 1][0]:
nozzle_change_counter += 1
head_cycle_counter += head_nozzle_assignment[head][cycle][1]
cycle_counter = max(cycle_counter, head_cycle_counter)
# === 元件拾取次数预估 ===
cp_info = []
for idx, points in cp_points.items():
if points == 0:
continue
feeder_limit = 1 # todo: 暂时仅考虑一种吸嘴的情形
reminder_points = points % feeder_limit
for _ in range(feeder_limit):
cp_info.append([idx, points // feeder_limit + (1 if reminder_points > 0 else 0), cp_nozzle[idx]])
reminder_points -= 1
cp_info.sort(key=lambda x: -x[1])
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for info in cp_info:
nozzle = info[2]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], info[1])
nozzle_counter[nozzle] += 1
pickup_counter = sum(points for points in level_points.values())
return cycle_counter, nozzle_change_counter, anc_round_counter, pickup_counter
def decode(self, line_info): def decode(self, line_info):
boundary = [random.choice(self.x_range), random.choice(self.y_range)]
items = line_info.split('\t') items = line_info.split('\t')
total_points, total_component_types = int(items[8]), int(items[9]) board_width, board_height = float(items[7]), float(items[8])
total_points, total_component_types = int(items[9]), int(items[10])
part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points'] part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points']
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "pl", step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "pl",
@ -159,53 +381,67 @@ class DataMgr:
idx = 1 idx = 1
for cp_counter in range(total_component_types): for cp_counter in range(total_component_types):
# todo: 这里为了调试暂时未修改 part, nozzle = items[12 + cp_counter * 3], items[13 + cp_counter * 3]
part, nozzle = items[11 + cp_counter * 3], items[12 + cp_counter * 3] points = int(items[14 + cp_counter * 3])
points = int(items[13 + cp_counter * 3])
pos_list = []
for _ in range(points):
pos_list.append([np.random.uniform(0, board_width), np.random.uniform(0, board_height)])
component_data = pd.concat([component_data, pd.DataFrame( component_data = pd.concat([component_data, pd.DataFrame(
[part, '', 'SM8', nozzle, '飞行相机1', 'CHIP-Rect', self.default_feeder_limit, points], index=part_col).T], [part, '', 'SM8', nozzle, '飞行相机1', 'CHIP-Rect', self.default_feeder_limit, points], index=part_col).T],
ignore_index=True) ignore_index=True)
for _ in range(points): for pos_x, pos_y in pos_list:
pos_x, pos_y = np.random.uniform(0, boundary[0]), np.random.uniform(0, boundary[1]) pcb_data = pd.concat([pcb_data, pd.DataFrame([['R' + str(idx), - pos_x, pos_y,
pcb_data = pd.concat([pcb_data, pd.DataFrame([['R' + str(idx), -pos_x, pos_y, 0.000, 0.000, part, '', 0.000, 0.000, part, '', 'A', '1-0 ' + nozzle, 1, 1, 1, 0,
'A', '1-0 ' + nozzle, 1, 1, 1, 0, 1, 1, 1, 'L0']], 1, 1, 1, 'L0']], columns=pcb_data.columns)],
columns=pcb_data.columns)], ignore_index=True) ignore_index=True)
return pcb_data, component_data return pcb_data, component_data
def loader(self, file_path): def loader(self, file_path):
train_data, time_data = [], [] train_data, time_data = [], []
cycle_data, nozzle_change_data, pickup_data, movement_data, point_data = [], [], [], [], [] cycle_data, nozzle_change_data, anc_move_data, pickup_data, movement_data, point_data = [], [], [], [], [], []
with open(file_path, 'r') as file: with open(file_path, 'r') as file:
line = file.readline() line = file.readline()
while line: while line:
items = line.split('\t') items = line.split('\t')
total_points, total_component_types = float(items[8]), float(items[9])
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# cp_width, cp_height = defaultdict(float), defaultdict(float)
for cp_idx in range((len(items) - 12) // 3):
points = int(items[14 + cp_idx * 3])
if points == 0:
continue
component_type, nozzle_type = items[12 + cp_idx * 3], items[13 + cp_idx * 3]
cp_points[component_type], cp_nozzle[component_type] = points, nozzle_type
# cp_width[component_type], cp_height[component_type] = float(items[15 + cp_idx * 5]), float(
# items[16 + cp_idx * 5])
# if len(set(cp_nozzle.values())) > 2 or len(set(cp_nozzle.keys())) > 3:
if len(cp_points.keys()) > 30:
line = file.readline()
continue
cycle_data.append(float(items[1])) cycle_data.append(float(items[1]))
nozzle_change_data.append(float(items[2])) nozzle_change_data.append(float(items[2]))
pickup_data.append(float(items[3])) anc_move_data.append(float(items[3]))
movement_data.append(float(items[4])) pickup_data.append(float(items[4]))
point_data.append(total_points) movement_data.append(float(items[5]) + float(items[6]))
point_data.append(sum(pt for pt in cp_points.values()))
# assembly time data # assembly time data
time_data.append(float(items[0])) time_data.append(float(items[0]))
cp_points, cp_nozzle = defaultdict(int), defaultdict(str) train_data.append(self.encode(cp_points, cp_nozzle, float(items[7]), float(items[8])))
for cp_counter in range(int(total_component_types)): # train_data[-1].extend([cycle_data[-1], nozzle_change_data[-1], anc_move_data[-1], pickup_data[-1]])
component_type, nozzle_type = items[11 + cp_counter * 3], items[12 + cp_counter * 3]
points = int(items[13 + cp_counter * 3])
cp_points[component_type], cp_nozzle[component_type] = points, nozzle_type
train_data.append(self.encode(cp_points, cp_nozzle, float(items[6]), float(items[7])))
line = file.readline() line = file.readline()
return train_data, time_data, cycle_data, nozzle_change_data, pickup_data, movement_data, point_data return train_data, time_data, cycle_data, nozzle_change_data, anc_move_data, pickup_data, point_data
def get_feature(self): def get_feature(self):
return (self.max_component_types + 1) * len(self.nozzle_type_list) + 5 return (self.max_component_types + 2) * self.max_nozzle_types + 5 + 4
def get_update_round(self): def get_update_round(self):
return self.update return self.update

View File

@ -1,160 +1,54 @@
import random
import numpy as np
from dataloader import * from dataloader import *
from optimizer_genetic import * from optimizer_genetic import line_optimizer_genetic
from optimizer_heuristic import * from optimizer_heuristic import line_optimizer_heuristic
from optimizer_reconfiguration import * from optimizer_reconfiguration import line_optimizer_reconfiguration
from optimizer_hyperheuristic import line_optimizer_hyperheuristic
from base_optimizer.optimizer_interface import * from base_optimizer.optimizer_interface import *
def deviation(data):
assert len(data) > 0
average, variance = sum(data) / len(data), 0
for v in data:
variance += (v - average) ** 2
return variance / len(data)
def optimizer(pcb_data, component_data, line_optimizer, machine_optimizer, machine_number): def optimizer(pcb_data, component_data, line_optimizer, machine_optimizer, machine_number):
if line_optimizer == "heuristic": if machine_number > 1:
assignment_result = assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number) if line_optimizer == 'hyper-heuristic':
elif line_optimizer == "genetic": assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, machine_number)
assignment_result = assemblyline_optimizer_genetic(pcb_data, component_data, machine_number) elif line_optimizer == "heuristic":
elif line_optimizer == "reconfiguration": assignment_result = line_optimizer_heuristic(component_data, machine_number)
assignment_result = reconfiguration_optimizer(pcb_data, component_data, machine_number) elif line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, machine_number)
elif line_optimizer == "reconfiguration":
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, machine_number)
else:
raise 'line optimizer method is not existed'
else: else:
return assignment_result = [[]]
for _, data in component_data.iterrows():
assignment_result[-1].append(data.points)
assignment_result_cpy = copy.deepcopy(assignment_result) partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
placement_points, assembly_info = [], [] assembly_info = []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(machine_number): for machine_index in range(machine_number):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns) assembly_info.append(
partial_component_data[machine_index] = component_data.copy(deep=True) base_optimizer(machine_index + 1, partial_pcb_data[machine_index], partial_component_data[machine_index],
placement_points.append(sum(assignment_result[machine_index])) feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), method=machine_optimizer,
hinter=True))
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
if feeder_points[machine_index] == 0:
continue
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder
for machine_index in range(machine_number):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] += 1
feeder_limit -= 1
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index, 'feeder-limit'] > 0
# === assign placements ===
component_machine_index = [0 for _ in range(len(component_data))]
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == 0:
component_machine_index[part_index] += 1
machine_index += 1
else:
break
assignment_result[machine_index][part_index] -= 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
# === adjust the number of available feeders for single optimization separately ===
for machine_index, data in partial_pcb_data.items():
data = data.reset_index(drop=True)
if len(data) == 0:
continue
part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
for part_index, cp_data in partial_component_data[machine_index].iterrows():
if assignment_result_cpy[machine_index][part_index]:
part_info.append(
[part_index, assignment_result_cpy[machine_index][part_index], 1, cp_data['feeder-limit']])
part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
while start_index < len(part_info):
assign_part_point, assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
assign_part_index.append(idx_)
variance = deviation(assign_part_point)
while start_index <= end_index:
part_info_index = assign_part_index[np.argmax(assign_part_point)]
if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
part_info[part_info_index][2] += 1
end_index -= 1
new_assign_part_point, new_assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
new_assign_part_index.append(idx_)
new_variance = deviation(new_assign_part_point)
if variance < new_variance:
part_info[part_info_index][2] -= 1
end_index += 1
break
variance = new_variance
assign_part_index, assign_part_point = new_assign_part_index.copy(), new_assign_part_point.copy()
else:
break
start_index = end_index + 1
end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
# update available feeder number
max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
for info in part_info:
partial_component_data[machine_index].loc[info[0], 'feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
assembly_info.append(base_optimizer(machine_index + 1, data, partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method=machine_optimizer, hinter=True))
with open('model/lr_model.pkl', 'rb') as f:
lr = pickle.load(f)
average_time, standard_deviation_time = sum(
[assembly_info[m].placement_time for m in range(machine_number)]) / machine_number, 0
for machine_index in range(machine_number): for machine_index in range(machine_number):
total_component_types = sum(1 if pt else 0 for pt in assignment_result_cpy[machine_index]) total_component_types = sum(1 if pt else 0 for pt in assignment_result[machine_index])
placement_time = assembly_info[machine_index].placement_time total_placement_points = sum(assignment_result[machine_index])
total_time = assembly_info[machine_index].total_time
print(f'assembly time for machine {machine_index + 1: d}: {total_time: .3f} s, total placement: '
f'{total_placement_points}, total component types {total_component_types: d}', end='')
for part_index in range(len(assignment_result[machine_index])):
if assignment_result[machine_index][part_index]:
print(', ', part_index, end='')
print('')
regression_time = lr.coef_[0][0] * assembly_info[machine_index].cycle_counter + lr.coef_[0][1] * assembly_info[ print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
machine_index].nozzle_change_counter + lr.coef_[0][2] * assembly_info[machine_index].pickup_counter + \ f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
lr.coef_[0][3] * assembly_info[machine_index].pickup_movement + lr.coef_[0][4] * \
placement_points[machine_index] + lr.intercept_[0]
print(f'assembly time for machine {machine_index + 1: d}: {placement_time: .3f} s, total placement: '
f'{placement_points[machine_index]}, total component types {total_component_types: d}', end=', ')
print(f'regression time: {regression_time: .3f} s')
standard_deviation_time += pow(placement_time - average_time, 2)
standard_deviation_time /= machine_number
standard_deviation_time = math.sqrt(standard_deviation_time)
print(f'finial assembly time: {max(info.placement_time for info in assembly_info): .3f} s, '
f'standard deviation: {standard_deviation_time: .3f}')
@timer_wrapper @timer_wrapper
@ -165,10 +59,10 @@ def main():
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data') parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data')
parser.add_argument('--auto_register', default=1, type=int, help='register the component according the pcb data') parser.add_argument('--auto_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line') parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_optimizer', default='feeder_scan', type=str, help='optimizer for single machine') parser.add_argument('--machine_optimizer', default='feeder-scan', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='genetic', type=str, help='optimizer for PCB Assembly Line') parser.add_argument('--line_optimizer', default='hyper-heuristic', type=str, help='optimizer for PCB assembly line')
parser.add_argument('--feeder_limit', default=1, type=int, # parser.add_argument('--line_optimizer', default='genetic', type=str, help='optimizer for PCB assembly line')
help='the upper feeder limit for each type of component') parser.add_argument('--feeder_limit', default=1, type=int, help='the upper feeder limit for each type of component')
params = parser.parse_args() params = parser.parse_args()
# 结果输出显示所有行和列 # 结果输出显示所有行和列
@ -181,6 +75,40 @@ def main():
optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, params.machine_number) optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, params.machine_number)
# index_list, part_list = [1, 4, 8, 9, 12, 13, 14, 18, 20, 22, 23, 25, 33, 35, 38, 39, 40], []
# for idx in index_list:
# part_list.append(component_data.iloc[idx].part)
# pcb_data = pcb_data[pcb_data['part'].isin(part_list)].reset_index(drop=True)
# component_data = component_data.iloc[index_list].reset_index(drop=True)
# optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, 1)
#
# from optimizer_hyperheuristic import DataMgr, Net
# data_mgr = DataMgr()
# cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
# for _, data in component_data.iterrows():
# cp_points[data.part], cp_nozzle[data.part] = data.points, data.nz
# idx = 1832
# data = data_mgr.loader(file_name)
# encoding = np.array(data[0][idx])
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
#
# net.load_state_dict(torch.load('model/net_model.pth'))
# board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
# encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
# encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
# print(f'net pred time: {net(encoding)[0, 0].item():.3f}')
# with open('model/lr_model.pkl', 'rb') as f:
# lr = pickle.load(f)
#
# print('lr model train data: ', np.array(data[2:]).T[idx].reshape(1, -1))
# print('lr model pred time: ', lr.predict(np.array(data[2:]).T[idx].reshape(1, -1)))
# print('real time: ', data[-1][idx] * 3600 / data[1][idx])
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -49,7 +49,7 @@ def selective_crossover(component_points, component_feeders, mother, father, mac
one_counter, feasible_cut_line = 0, [] one_counter, feasible_cut_line = 0, []
idx = 0 idx = 0
for part_index, points in component_points: for part_index, points in component_points.items():
one_counter = 0 one_counter = 0
idx_, mother_cut_line, father_cut_line = 0, [-1], [-1] idx_, mother_cut_line, father_cut_line = 0, [-1], [-1]
@ -131,13 +131,12 @@ def selective_crossover(component_points, component_feeders, mother, father, mac
return offspring1, offspring2 return offspring1, offspring2
def cal_individual_val(component_points, component_feeders, component_nozzle, machine_number, individual, data_mgr, net): def cal_individual_val(component_points, component_nozzle, machine_number, individual, estimator):
idx, objective_val = 0, [] idx, objective_val = 0, []
machine_component_points = [[] for _ in range(machine_number)] machine_component_points = [[] for _ in range(machine_number)]
nozzle_component_points = defaultdict(list)
# decode the component allocation # decode the component allocation
for comp_idx, points in component_points: for part_index, points in component_points.items():
component_gene = individual[idx: idx + points + machine_number - 1] component_gene = individual[idx: idx + points + machine_number - 1]
machine_idx, component_counter = 0, 0 machine_idx, component_counter = 0, 0
for gene in component_gene: for gene in component_gene:
@ -150,108 +149,19 @@ def cal_individual_val(component_points, component_feeders, component_nozzle, ma
machine_component_points[-1].append(component_counter) machine_component_points[-1].append(component_counter)
idx += (points + machine_number - 1) idx += (points + machine_number - 1)
nozzle_component_points[component_nozzle[comp_idx]] = [0] * len(component_points) # 初始化元件-吸嘴点数列表 objective_val = 0
# ======== 新加的开始 ========
for machine_idx in range(machine_number): for machine_idx in range(machine_number):
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for comp_idx, _ in component_points:
if machine_component_points[machine_idx][comp_idx] == 0:
continue
cp_points['C' + str(comp_idx)] = machine_component_points[machine_idx][comp_idx]
cp_nozzle['C' + str(comp_idx)] = component_nozzle[comp_idx]
encoding = np.array(data_mgr.encode(cp_points, cp_nozzle, 45, 150))
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
# pred_time = net(encoding)[0, 0].item()
# objective_val.append(pred_time * sum(points for points in cp_points.values()))
objective_val.append(net(encoding)[0, 0].item())
return objective_val, machine_component_points
# ======== 新加的结束(以下内容弃用) =====
for comp_idx, points in component_points:
nozzle_component_points[component_nozzle[comp_idx]][comp_idx] = points
for machine_idx in range(machine_number):
nozzle_points = defaultdict(int)
for idx, nozzle in component_nozzle.items():
if component_points[idx] == 0:
continue
nozzle_points[nozzle] += machine_component_points[machine_idx][idx]
machine_points = sum(machine_component_points[machine_idx]) # num of placement points machine_points = sum(machine_component_points[machine_idx]) # num of placement points
if machine_points == 0: if machine_points == 0:
continue continue
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
# assignments of nozzles to heads cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
wl = 0 # num of workload for part_index, points in enumerate(machine_component_points[machine_idx]):
total_heads = (1 + ul) * max_head_index - len(nozzle_points) if points == 0:
nozzle_heads = defaultdict(int)
for nozzle in nozzle_points.keys():
if nozzle_points[nozzle] == 0:
continue continue
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / machine_points * total_heads) cp_points[part_index], cp_nozzle[part_index] = points, component_nozzle[part_index]
nozzle_heads[nozzle] += 1 # objective_val = max(objective_val, estimator.neural_network(cp_points, cp_nozzle, 237.542, 223.088))
objective_val = max(objective_val, estimator.heuristic_genetic(cp_points, cp_nozzle))
total_heads = (1 + ul) * max_head_index
for heads in nozzle_heads.values():
total_heads -= heads
while True:
nozzle = max(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x])
if total_heads == 0:
break
nozzle_heads[nozzle] += 1
total_heads -= 1
# averagely assign placements to heads
heads_placement = []
for nozzle in nozzle_heads.keys():
points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle])
heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])]
nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points)
for idx in range(len(heads_placement) - 1, -1, -1):
if nozzle_points[nozzle] <= 0:
break
nozzle_points[nozzle] -= 1
heads_placement[idx][1] += 1
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
# the number of pick-up operations
# (under the assumption of the number of feeder available for each comp. type is equal 1)
pl = 0
heads_placement_points = [0 for _ in range(max_head_index)]
while True:
head_assign_point = []
for head in range(max_head_index):
if heads_placement_points[head] != 0 or heads_placement[head] == 0:
continue
nozzle, points = heads_placement[head]
max_comp_index = np.argmax(nozzle_component_points[nozzle])
heads_placement_points[head] = min(points, nozzle_component_points[nozzle][max_comp_index])
nozzle_component_points[nozzle][max_comp_index] -= heads_placement_points[head]
head_assign_point.append(heads_placement_points[head])
min_points_list = list(filter(lambda x: x > 0, heads_placement_points))
if len(min_points_list) == 0 or len(head_assign_point) == 0:
break
pl += max(head_assign_point)
for head in range(max_head_index):
heads_placement[head][1] -= min(min_points_list)
heads_placement_points[head] -= min(min_points_list)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
objective_val.append(T_pp * machine_points + T_tr * wl + T_nc * ul + T_pl * pl)
return objective_val, machine_component_points return objective_val, machine_component_points
@ -276,35 +186,25 @@ def individual_convert(component_points, individual):
return machine_component_points return machine_component_points
def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number): def line_optimizer_genetic(component_data, machine_number):
# basic parameter # basic parameter
# crossover rate & mutation rate: 80% & 10% # crossover rate & mutation rate: 80% & 10%cizh
# population size: 200 # population size: 200
# the number of generation: 500 # the number of generation: 500
crossover_rate, mutation_rate = 0.8, 0.1 crossover_rate, mutation_rate = 0.8, 0.1
population_size, n_generations = 200, 500 population_size, n_generations = 200, 500
estimator = Estimator()
# the number of placement points, the number of available feeders, and nozzle type of component respectively # the number of placement points, the number of available feeders, and nozzle type of component respectively
component_points, component_feeders, component_nozzle = defaultdict(int), defaultdict(int), defaultdict(str) cp_points, cp_feeders, cp_nozzle = defaultdict(int), defaultdict(int), defaultdict(int)
for data in pcb_data.iterrows(): for part_index, data in component_data.iterrows():
part_index = component_data[component_data['part'] == data[1]['part']].index.tolist()[0] cp_points[part_index] += data['points']
nozzle = component_data.loc[part_index]['nz'] cp_feeders[part_index] = data['feeder-limit']
cp_nozzle[part_index] = data['nz']
component_points[part_index] += 1
component_feeders[part_index] = component_data.loc[part_index]['feeder-limit']
component_nozzle[part_index] = nozzle
component_points = sorted(component_points.items(), key=lambda x: x[0]) # 决定染色体排列顺序
data_mgr = DataMgr()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
net.load_state_dict(torch.load('model/net_model.pth'))
# optimizer = torch.optim.Adam(net.parameters(), lr=0.1)
# optimizer.load_state_dict(torch.load('optimizer_state.pth'))
# population initialization # population initialization
population = selective_initialization(component_points, component_feeders, population_size, machine_number) population = selective_initialization(sorted(cp_points.items(), key=lambda x: x[0]), cp_feeders, population_size,
machine_number)
with tqdm(total=n_generations) as pbar: with tqdm(total=n_generations) as pbar:
pbar.set_description('genetic algorithm process for PCB assembly line balance') pbar.set_description('genetic algorithm process for PCB assembly line balance')
@ -313,9 +213,8 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
# calculate fitness value # calculate fitness value
pop_val = [] pop_val = []
for individual in population: for individual in population:
val, assigned_points = cal_individual_val(component_points, component_feeders, component_nozzle, val, assigned_points = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
machine_number, individual, data_mgr, net) pop_val.append(val)
pop_val.append(max(val))
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False) select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index] population = [population[idx] for idx in select_index]
@ -323,9 +222,8 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
population += new_population population += new_population
for individual in new_population: for individual in new_population:
val, _ = cal_individual_val(component_points, component_feeders, component_nozzle, machine_number, val, _ = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
individual, data_mgr, net) pop_val.append(val)
pop_val.append(max(val))
# min-max convert # min-max convert
max_val = max(pop_val) max_val = max(pop_val)
@ -343,14 +241,14 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
if index1 != index2: if index1 != index2:
break break
offspring1, offspring2 = selective_crossover(component_points, component_feeders, offspring1, offspring2 = selective_crossover(cp_points, cp_feeders,
population[index1], population[index2], machine_number) population[index1], population[index2], machine_number)
if np.random.random() < mutation_rate: if np.random.random() < mutation_rate:
offspring1 = constraint_swap_mutation(component_points, offspring1, machine_number) offspring1 = constraint_swap_mutation(cp_points, offspring1, machine_number)
if np.random.random() < mutation_rate: if np.random.random() < mutation_rate:
offspring2 = constraint_swap_mutation(component_points, offspring2, machine_number) offspring2 = constraint_swap_mutation(cp_points, offspring2, machine_number)
new_population.append(offspring1) new_population.append(offspring1)
new_population.append(offspring2) new_population.append(offspring2)
@ -358,8 +256,7 @@ def assemblyline_optimizer_genetic(pcb_data, component_data, machine_number):
pbar.update(1) pbar.update(1)
best_individual = population[np.argmax(pop_val)] best_individual = population[np.argmax(pop_val)]
val, assignment_result = cal_individual_val(component_points, component_feeders, component_nozzle, machine_number, val, assignment_result = cal_individual_val(cp_points, cp_nozzle, machine_number, best_individual, estimator)
best_individual, data_mgr, net)
print('final value: ', val) print('final value: ', val)
# available feeder check # available feeder check

View File

@ -11,13 +11,13 @@ from base_optimizer.result_analysis import *
# TODO: nozzle tool available restriction # TODO: nozzle tool available restriction
# TODO: consider with the PCB placement topology # TODO: consider with the PCB placement topology
def assembly_time_estimator(assignment_points, component_feeders, component_nozzle): def assembly_time_estimator(assignment_points, arranged_feeders, component_data):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int) nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for idx, points in enumerate(assignment_points): for idx, points in enumerate(assignment_points):
if points == 0: if points == 0:
continue continue
nozzle_points[component_nozzle[idx]] += points nozzle_points[component_data.iloc[idx]['nz']] += points
nozzle_heads[component_nozzle[idx]] = 1 nozzle_heads[component_data.iloc[idx]['nz']] = 1
while sum(nozzle_heads.values()) != max_head_index: while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None max_cycle_nozzle = None
@ -174,10 +174,11 @@ def assembly_time_estimator(assignment_points, component_feeders, component_nozz
for idx, points in enumerate(assignment_points): for idx, points in enumerate(assignment_points):
if points == 0: if points == 0:
continue continue
reminder_points = points % component_feeders[idx] feeder_limit = int(component_data.iloc[idx]['feeder-limit'])
for _ in range(component_feeders[idx]): reminder_points = points % feeder_limit
for _ in range(feeder_limit):
cp_info.append( cp_info.append(
[idx, points // component_feeders[idx] + (1 if reminder_points > 0 else 0), component_nozzle[idx]]) [idx, points // feeder_limit + (1 if reminder_points > 0 else 0), component_data.iloc[idx]['nz']])
reminder_points -= 1 reminder_points -= 1
cp_info.sort(key=lambda x: -x[1]) cp_info.sort(key=lambda x: -x[1])
@ -204,46 +205,35 @@ def assembly_time_estimator(assignment_points, component_feeders, component_nozz
t_place * placement_counter + 0.1 * pickup_movement t_place * placement_counter + 0.1 * pickup_movement
def assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number): def line_optimizer_heuristic(component_data, machine_number):
# the number of placement points, the number of available feeders, and nozzle type of component respectively # the number of placement points, the number of available feeders, and nozzle type of component respectively
component_number = len(component_data) component_number = len(component_data)
component_points = [0 for _ in range(component_number)]
component_feeders = [0 for _ in range(component_number)]
component_nozzle = [0 for _ in range(component_number)]
component_part = [0 for _ in range(component_number)]
nozzle_points = defaultdict(int) # the number of placements of nozzle nozzle_points = defaultdict(int) # the number of placements of nozzle
total_points = 0
for _, data in pcb_data.iterrows(): for _, data in component_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0] nozzle = data['nz']
nozzle = component_data.loc[part_index]['nz'] nozzle_points[nozzle] += data['points']
total_points += data['point']
component_points[part_index] += 1
component_feeders[part_index] = component_data.loc[part_index]['feeder-limit']
# component_feeders[part_index] = math.ceil(component_data.loc[part_index]['feeder-limit'] / max_feeder_limit)
component_nozzle[part_index] = nozzle
component_part[part_index] = data['part']
nozzle_points[nozzle] += 1
# first step: generate the initial solution with equalized workload # first step: generate the initial solution with equalized workload
assignment_result = [[0 for _ in range(len(component_points))] for _ in range(machine_number)] assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
assignment_points = [0 for _ in range(machine_number)] assignment_points = [0 for _ in range(machine_number)]
average_points = len(pcb_data) // machine_number average_points = total_points // machine_number
weighted_points = list( weighted_points = list(
map(lambda x: x[1] + 1e-5 * nozzle_points[component_nozzle[x[0]]], enumerate(component_points))) map(lambda _, data: data['points'] + 1e-5 * nozzle_points[data['nz']], component_data.iterrows()))
# for part_index in np.argsort(weighted_points)[::-1]: # for part_index in np.argsort(weighted_points)[::-1]:
for part_index in np.argsort(weighted_points)[::-1]: for part_index in np.argsort(weighted_points)[::-1]:
if (total_points := component_points[part_index]) == 0: # total placements for each component type if (total_points := component_data.iloc[part_index]['points']) == 0: # total placements for each component type
continue continue
machine_set = [] machine_set = []
# define the machine that assigning placement points (considering the feeder limitation) # define the machine that assigning placement points (considering the feeder limitation)
for machine_index in np.argsort(assignment_points): for machine_index in np.argsort(assignment_points):
if len(machine_set) >= component_points[part_index] or len(machine_set) >= component_feeders[part_index]: if len(machine_set) >= component_data.iloc[part_index]['points'] or len(machine_set) >= \
component_data.iloc[part_index]['feeder-limit']:
break break
machine_set.append(machine_index) machine_set.append(machine_index)
@ -308,7 +298,7 @@ def assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number):
arranged_feeders[machine_index] = [0 for _ in range(len(component_data))] arranged_feeders[machine_index] = [0 for _ in range(len(component_data))]
for part_index in range(len(component_data)): for part_index in range(len(component_data)):
feeder_limit = component_feeders[part_index] # 总体可用数 feeder_limit = component_data.iloc[part_index]['feeder-limit'] # 总体可用数
for machine_index in range(machine_number): for machine_index in range(machine_number):
if assignment_result[machine_index][part_index] == 0: if assignment_result[machine_index][part_index] == 0:
continue continue
@ -318,7 +308,7 @@ def assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number):
assert feeder_limit >= 0 assert feeder_limit >= 0
for part_index in range(len(component_data)): for part_index in range(len(component_data)):
total_feeder_limit = component_feeders[part_index] - sum( total_feeder_limit = component_data.iloc[part_index]['feeder-limit'] - sum(
[arranged_feeders[machine_index][part_index] for machine_index in range(machine_number)]) [arranged_feeders[machine_index][part_index] for machine_index in range(machine_number)])
while total_feeder_limit > 0: while total_feeder_limit > 0:
max_ratio, max_ratio_machine = None, -1 max_ratio, max_ratio_machine = None, -1
@ -336,7 +326,7 @@ def assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number):
for machine_index in range(machine_number): for machine_index in range(machine_number):
assembly_time.append( assembly_time.append(
assembly_time_estimator(assignment_result[machine_index], arranged_feeders[machine_index], assembly_time_estimator(assignment_result[machine_index], arranged_feeders[machine_index],
component_nozzle)) component_data))
chip_per_hour.append(sum(assignment_result[machine_index]) / (assembly_time[-1] + 1e-10)) chip_per_hour.append(sum(assignment_result[machine_index]) / (assembly_time[-1] + 1e-10))
max_assembly_time = max(assembly_time) max_assembly_time = max(assembly_time)
@ -351,7 +341,6 @@ def assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number):
# third step: adjust the assignment results to reduce maximal assembly time among all machines # third step: adjust the assignment results to reduce maximal assembly time among all machines
# ideal averagely assigned points # ideal averagely assigned points
total_points = len(pcb_data)
average_assign_points = [round(total_points * chip_per_hour[mi] / sum(chip_per_hour)) for mi in average_assign_points = [round(total_points * chip_per_hour[mi] / sum(chip_per_hour)) for mi in
range(machine_number)] range(machine_number)]
@ -391,7 +380,7 @@ def assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number):
tmp_reallocate_result[supply_mi] -= reallocate_points tmp_reallocate_result[supply_mi] -= reallocate_points
tmp_reallocate_result[demand_mi] += reallocate_points tmp_reallocate_result[demand_mi] += reallocate_points
if sum(1 for pt in tmp_reallocate_result if pt > 0) > component_feeders[part_index]: if sum(1 for pt in tmp_reallocate_result if pt > 0) > component_data.iloc[part_index]['feeder-limit']:
continue continue
assignment_result[supply_mi][part_index] -= reallocate_points assignment_result[supply_mi][part_index] -= reallocate_points

View File

@ -1,58 +1,317 @@
import os import os
import pickle import pickle
import random
import numpy as np import numpy as np
import pandas as pd
import torch.nn import torch.nn
from base_optimizer.optimizer_interface import * from base_optimizer.optimizer_interface import *
from generator import * from generator import *
from estimator import *
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class Net(torch.nn.Module): class Heuristic:
def __init__(self, input_size, hidden_size=1024, output_size=1): @staticmethod
super(Net, self).__init__() def apply(cp_points, cp_nozzle, cp_assign):
self.fc1 = torch.nn.Linear(input_size, hidden_size) return -1
self.relu = torch.nn.ReLU() # 激活函数
self.fc2 = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class LSTMNet(torch.nn.Module): class LeastPoints(Heuristic):
def __init__(self, input_size, hidden_size=256, output_size=1, num_layers=1): @staticmethod
super(LSTMNet, self).__init__() def apply(cp_points, cp_nozzle, cp_assign):
machine_points = []
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers) for machine_idx in range(len(cp_assign)):
self.fc = torch.nn.Linear(hidden_size, output_size) if len(cp_assign[machine_idx]) == 0:
return machine_idx
def forward(self, x): machine_points.append(sum([cp_points[cp_idx] for cp_idx in cp_assign[machine_idx]]))
x, _ = self.lstm(x) # x is input with size (seq_len, batch_size, input_size) return np.argmin(machine_points)
x = self.fc(x)
return x[-1, :, ]
def selective_initialization(component_points, population_size, machine_number): class LeastNzTypes(Heuristic):
# assignment_result = [[0 for _ in range(len(component_points))] for _ in range(machine_number)] @staticmethod
assignment_result = [] def apply(cp_points, cp_nozzle, cp_assign):
machine_nozzle = []
return assignment_result for machine_idx in range(len(cp_assign)):
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nozzle.append([cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]])
return np.argmin([len(set(nozzle)) for nozzle in machine_nozzle])
def optimizer_hyperheuristc(pcb_data, component_data, machine_number): class LeastCpTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
return np.argmin([len(cp) for cp in cp_assign])
class LeastCpNzRatio(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_nz_type, machine_cp_type = [], []
for machine_idx in range(len(cp_assign)):
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nz_type.append([cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]])
machine_cp_type.append(cp_assign[machine_idx])
return np.argmin(
[len(machine_cp_type[machine_idx]) / (len(machine_nz_type[machine_idx]) + 1e-5) for machine_idx in
range(len(cp_assign))])
def nozzle_assignment(cp_points, cp_nozzle, cp_assign):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for cp_idx in cp_assign:
nozzle_points[cp_nozzle[cp_idx]] += cp_points[cp_idx]
nozzle_heads[cp_nozzle[cp_idx]] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
return nozzle_heads, nozzle_points
class LeastCycle(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_cycle = []
for machine_idx, assign_component in enumerate(cp_assign):
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
machine_cycle.append(max(nozzle_points[nozzle] / head for nozzle, head in nozzle_heads.items()))
return np.argmin(machine_cycle)
class LeastNzChange(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_nozzle_change = []
for machine_idx, assign_component in enumerate(cp_assign):
if len(assign_component) == 0:
return machine_idx
heads_points = []
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
for nozzle, head in nozzle_heads.items():
for _ in range(head):
heads_points.append(nozzle_points[nozzle] / nozzle_heads[nozzle])
machine_nozzle_change.append(np.std(heads_points))
return np.argmin(machine_nozzle_change)
class LeastPickup(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign):
machine_pick_up = []
for machine_idx, assign_component in enumerate(cp_assign):
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for cp_idx in sorted(assign_component, key=lambda x: cp_points[x], reverse=True):
nozzle, points = cp_nozzle[cp_idx], cp_points[cp_idx]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], points)
nozzle_counter[nozzle] += 1
machine_pick_up.append(sum(points for points in level_points.values()))
return np.argmin(machine_pick_up)
def generate_pattern(heuristic_map, cp_points):
"""
Generates a random pattern.
:return: The generated pattern string.
"""
return "".join([random.choice(list(heuristic_map.keys())) for _ in range(random.randrange(1, len(cp_points)))])
def crossover(parent1, parent2):
"""
Attempt to perform crossover between two chromosomes.
:param parent1: The first parent.
:param parent2: The second parent.
:return: The two individuals after crossover has been performed.
"""
point1, point2 = random.randrange(len(parent1)), random.randrange(len(parent2))
substr1, substr2 = parent1[point1:], parent2[point2:]
offspring1, offspring2 = "".join((parent1[:point1], substr2)), "".join((parent2[:point2], substr1))
return offspring1, offspring2
def mutation(heuristic_map, cp_points, individual):
"""
Attempts to mutate the individual by replacing a random heuristic in the chromosome by a generated pattern.
:param individual: The individual to mutate.
:return: The mutated individual.
"""
pattern = list(individual)
mutation_point = random.randrange(len(pattern))
pattern[mutation_point] = generate_pattern(heuristic_map, cp_points)
return ''.join(pattern)
def population_initialization(population_size, heuristic_map, cp_points):
return [generate_pattern(heuristic_map, cp_points) for _ in range(population_size)]
def convert_assignment_result(heuristic_map, cp_points, cp_nozzle, component_list, individual, machine_number):
machine_cp_assign = [[] for _ in range(machine_number)]
for idx, cp_idx in enumerate(component_list):
h = individual[idx % len(individual)]
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, machine_cp_assign)
machine_cp_assign[machine_idx].append(cp_idx)
return machine_cp_assign
def cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height, component_list,
individual, machine_number, estimator):
machine_cp_assign = convert_assignment_result(heuristic_map, cp_points, cp_nozzle, component_list,
individual, machine_number)
objective_val = []
for machine_idx in range(machine_number):
machine_cp_points, machine_cp_nozzle = defaultdict(int), defaultdict(str)
for cp_idx in machine_cp_assign[machine_idx]:
machine_cp_points[cp_idx] = cp_points[cp_idx]
machine_cp_nozzle[cp_idx] = cp_nozzle[cp_idx]
objective_val.append(estimator.neural_network(machine_cp_points, machine_cp_nozzle, board_width, board_height))
# objective_val.append(estimator.heuristic_genetic(machine_cp_points, machine_cp_nozzle))
return objective_val
def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
heuristic_map = {
'p': LeastPoints,
'n': LeastNzChange,
'c': LeastCpTypes,
'r': LeastCpNzRatio,
'k': LeastCycle,
'g': LeastNzChange,
'u': LeastPickup,
}
# genetic-based hyper-heuristic # genetic-based hyper-heuristic
crossover_rate, mutation_rate = 0.8, 0.1 crossover_rate, mutation_rate = 0.8, 0.1
population_size, n_generations = 200, 500 population_size, n_generations = 20, 100
n_iterations = 10
# todo: how to generate initial population (random?) estimator = Estimator()
# assignment_result = selective_initialization(component_points, population_size, machine_number)
assignment_result = [] best_val, best_component_list = None, None
best_individual = None
division_component_data = pd.DataFrame(columns=component_data.columns)
for _, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
data['feeder-limit'], data['points'] = 1, int(data['points'] / data['feeder-limit'])
for _ in range(feeder_limit):
division_component_data = pd.concat([division_component_data, pd.DataFrame(data).T])
division_component_data = division_component_data.reset_index()
component_list = [idx for idx, data in division_component_data.iterrows() if data['points'] > 0]
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for idx, data in division_component_data.iterrows():
cp_points[idx], cp_nozzle[idx] = data['points'], data['nz']
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
with tqdm(total=n_generations * n_iterations) as pbar:
pbar.set_description('hyper-heuristic algorithm process for PCB assembly line balance')
for _ in range(n_iterations):
random.shuffle(component_list)
new_population = []
population = population_initialization(population_size, heuristic_map, cp_points)
# calculate fitness value
pop_val = []
for individual in population:
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
component_list, individual, machine_number, estimator)
pop_val.append(max(val))
for _ in range(n_generations):
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
population += new_population
for individual in new_population:
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
component_list, individual, machine_number, estimator)
pop_val.append(max(val))
# min-max convert
max_val = max(pop_val)
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(sel_pop_val) + 1e-10
sel_pop_val = [v / sum_pop_val + 1e-3 for v in sel_pop_val]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(sel_pop_val)
while True:
index2 = roulette_wheel_selection(sel_pop_val)
if index1 != index2:
break
offspring1, offspring2 = crossover(population[index1], population[index2])
if np.random.random() < mutation_rate:
offspring1 = mutation(heuristic_map, cp_points, offspring1)
if np.random.random() < mutation_rate:
offspring2 = mutation(heuristic_map, cp_points, offspring2)
new_population.append(offspring1)
new_population.append(offspring2)
pbar.update(1)
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
component_list, population[0], machine_number, estimator)
val = max(val)
if best_val is None or val < best_val:
best_val = val
best_individual = population[0]
best_component_list = component_list.copy()
machine_cp_points = convert_assignment_result(heuristic_map, cp_points, cp_nozzle, best_component_list,
best_individual, machine_number)
val = cal_individual_val(heuristic_map, cp_points, cp_nozzle, board_width, board_height,
best_component_list, best_individual, machine_number, estimator)
print(val)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for cp_idx in machine_cp_points[machine_idx]:
idx = division_component_data.iloc[cp_idx]['index']
assignment_result[machine_idx][idx] += cp_points[cp_idx]
print(assignment_result)
return assignment_result return assignment_result
@ -67,9 +326,9 @@ if __name__ == '__main__':
help='determine whether overwriting the training and testing data') help='determine whether overwriting the training and testing data')
parser.add_argument('--train_file', default='train_data.txt', type=str, help='training file path') parser.add_argument('--train_file', default='train_data.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data.txt', type=str, help='testing file path') parser.add_argument('--test_file', default='test_data.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=15000, type=int, help='number of epochs for training process') parser.add_argument('--num_epochs', default=8000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=100000, type=int, help='size of training batch') parser.add_argument('--batch_size', default=10000, type=int, help='size of training batch')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate for the network') parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
params = parser.parse_args() params = parser.parse_args()
@ -80,8 +339,9 @@ if __name__ == '__main__':
file = {params.train_file: params.batch_size, file = {params.train_file: params.batch_size,
params.test_file: params.batch_size // data_mgr.get_update_round() // 5} params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
for file_name, file_batch_size in file.items(): for file_name, file_batch_size in file.items():
for _ in range(int(file_batch_size)): with open('opt/' + file_name, 'a') as f:
with open('opt/' + file_name, 'a') as f: for _ in range(int(file_batch_size)):
mode = file_name.split('.')[0].split('_')[0] mode = file_name.split('.')[0].split('_')[0]
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
# data_mgr.remover() # remove the last saved data # data_mgr.remover() # remove the last saved data
@ -89,27 +349,27 @@ if __name__ == '__main__':
info = base_optimizer(1, pcb_data, component_data, info = base_optimizer(1, pcb_data, component_data,
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']), feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method='feeder_scan', method='feeder-scan', hinter=True)
hinter=True)
data_mgr.recorder(f, info, pcb_data, component_data) data_mgr.recorder(f, info, pcb_data, component_data)
f.close() f.close()
net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device) net = Net(input_size=data_mgr.get_feature(), output_size=1).to(device)
data = data_mgr.loader('opt/' + params.train_file)
if params.train: if params.train:
data = data_mgr.loader('opt/' + params.train_file)
x_fit, y_fit = np.array(data[2:]).T, np.array([data[1]]).T x_fit, y_fit = np.array(data[2:]).T, np.array([data[1]]).T
lr = LinearRegression() lr = LinearRegression()
lr.fit(x_fit, y_fit) lr.fit(x_fit, y_fit)
x_train, y_train = np.array(data[0][::10]), lr.predict(x_fit[::10]) x_train = np.array(data[0][::data_mgr.get_update_round()])
# x_train, y_train = np.array(data[0]), np.array(data[2]) # y_train = lr.predict(x_fit[::data_mgr.get_update_round()])
y_train = np.array(data[1][::data_mgr.get_update_round()])
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device) x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device) y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=params.lr) optimizer = torch.optim.Adam(net.parameters(), lr=params.lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=6000, gamma=0.8) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
loss_func = torch.nn.MSELoss() loss_func = torch.nn.MSELoss()
@ -120,7 +380,7 @@ if __name__ == '__main__':
loss.backward() loss.backward()
optimizer.step() optimizer.step()
# scheduler.step() # scheduler.step()
if epoch % 50 == 0: if epoch % 100 == 0:
print('Epoch: ', epoch, ', Loss: ', loss.item()) print('Epoch: ', epoch, ', Loss: ', loss.item())
if loss.item() < 1e-4: if loss.item() < 1e-4:
break break
@ -144,35 +404,43 @@ if __name__ == '__main__':
torch.save(net.state_dict(), 'model/net_model.pth') torch.save(net.state_dict(), 'model/net_model.pth')
with open('model/lr_model.pkl', 'wb') as f: with open('model/lr_model.pkl', 'wb') as f:
pickle.dump(lr, f) pickle.dump(lr, f)
# torch.save(optimizer.state_dict(), 'model/optimizer_state.pth') torch.save(optimizer.state_dict(), 'model/optimizer_state.pth')
else: else:
with open('model/lr_model.pkl', 'rb') as f:
lr = pickle.load(f)
net.load_state_dict(torch.load('model/net_model.pth')) net.load_state_dict(torch.load('model/net_model.pth'))
# optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) # optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
# optimizer.load_state_dict(torch.load('model/optimizer_state.pth')) # optimizer.load_state_dict(torch.load('model/optimizer_state.pth'))
data = data_mgr.loader('opt/' + params.test_file) data = data_mgr.loader('opt/' + params.test_file)
# x_test, y_test = np.array(data[0]), np.array(data[1]) x_test, y_test = np.array(data[0]), np.array(data[1])
x_test, y_test = np.array(data[0]), lr.predict(np.array(data[2:]).T) # x_test, y_test = np.array(data[0]), lr.predict(np.array(data[2:]).T)
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
x_test, y_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device), \
torch.from_numpy(y_test.reshape((-1, 1))).float().to(device)
net.eval() net.eval()
with torch.no_grad(): with torch.no_grad():
net_predict = net(x_test).view(-1) pred_time = net(x_test).view(-1).cpu().detach().numpy()
pred_time, real_time = net_predict.cpu().detach().numpy(), y_test.view(-1).cpu().detach().numpy() x_test = x_test.cpu().detach().numpy()
pred_error = np.array([]) over_set = []
for t1, t2 in np.nditer([pred_time, real_time]): pred_idx, pred_error = 0, np.array([])
for t1, t2 in np.nditer([pred_time, y_test.reshape(-1)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100) pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
print(pred_time)
print(real_time)
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .2f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .2f}% ')
mse = np.linalg.norm(pred_time - real_time) if pred_error[-1] > 5:
over_set.append(pred_idx + 1)
print(f'\033[0;31;31midx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {pred_error[-1]: .3f}\033[0m')
else:
pass
# print(f'idx: {pred_idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, gap: {pred_error[-1]: .3f}')
pred_idx += 1
print('over:', over_set)
print('size:', len(over_set))
print('--------------------------------------')
print(f'average prediction error for test data : {np.average(pred_error): .3f}% ')
print(f'maximum prediction error for test data : {np.max(pred_error): .3f}% ')
mse = np.linalg.norm(pred_time - y_test.reshape(-1))
print(f'mean square error for test data result : {mse: 2f} ') print(f'mean square error for test data result : {mse: 2f} ')

View File

@ -1,49 +1,18 @@
from base_optimizer.optimizer_common import * from base_optimizer.optimizer_common import *
from estimator import *
# 生产过程中不允许吸嘴更换/点的拾取贴装仅与供料器槽位/模组相关 def random_component_assignment(pcb_data, component_data, machine_number, estimator=None):
def objective_value_calculate(component_assignment, component_nozzle, task_block_weight, machine_number):
machine_assembly_time = []
for machine_index in range(max_machine_index):
task_block_number, total_point_number = 0, sum(component_assignment[machine_index])
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in enumerate(component_assignment[machine_index]):
nozzle = component_nozzle[part]
nozzle_points[nozzle] += points
nozzle_heads[nozzle] = 1
remaining_head = max_head_index - len(nozzle_heads)
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(task_block_weight, math.ceil(nozzle_points[nozzle] / heads_number))
machine_assembly_time.append(
(t_pick + t_place) * sum(component_assignment[machine_index]) + task_block_number * task_block_weight)
return max(machine_assembly_time)
def random_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight, machine_number):
component_points_cpy = copy.deepcopy(component_points)
component_number = len(component_points_cpy)
assignment_result = [[0 for _ in range(component_number)] for _ in range(machine_number)]
# == the set of feasible component type for each nozzle type # == the set of feasible component type for each nozzle type
nozzle_part_list = defaultdict(list) nozzle_part_list = defaultdict(list)
for index, nozzle in enumerate(component_nozzle): component_points = []
nozzle_part_list[nozzle].append(index) for idx, data in component_data.iterrows():
component_points.append(data.points)
nozzle_part_list[data.nz].append(idx)
component_number = len(component_data)
assignment_result = [[0 for _ in range(component_number)] for _ in range(machine_number)]
# === ensure every nozzle types === # === ensure every nozzle types ===
selected_part = [] selected_part = []
for part_list in nozzle_part_list.values(): for part_list in nozzle_part_list.values():
@ -51,7 +20,7 @@ def random_component_assignment(component_points, component_nozzle, component_fe
machine_index = random.randint(0, machine_number - 1) machine_index = random.randint(0, machine_number - 1)
assignment_result[machine_index][part] += 1 assignment_result[machine_index][part] += 1
component_points_cpy[part] -= 1 component_points[part] -= 1
selected_part.append(part) selected_part.append(part)
# === assign one placement which has not been selected === # === assign one placement which has not been selected ===
@ -60,7 +29,7 @@ def random_component_assignment(component_points, component_nozzle, component_fe
continue continue
assignment_result[random.randint(0, machine_number - 1)][part] += 1 assignment_result[random.randint(0, machine_number - 1)][part] += 1
component_points_cpy[part] -= 1 component_points[part] -= 1
machine_assign = list(range(machine_number)) machine_assign = list(range(machine_number))
random.shuffle(machine_assign) random.shuffle(machine_assign)
@ -73,62 +42,74 @@ def random_component_assignment(component_points, component_nozzle, component_fe
if assignment_result[idx][part] > 0 or idx == machine_index: if assignment_result[idx][part] > 0 or idx == machine_index:
feeder_counter += 1 feeder_counter += 1
if component_points_cpy[part] == 0 or feeder_counter > component_feeders[part]: if component_points[part] == 0 or feeder_counter > component_data.iloc[part]['feeder-limit']:
continue continue
# feeder limit restriction # feeder limit restriction
points = random.randint(1, component_points_cpy[part]) points = random.randint(1, component_points[part])
assignment_result[machine_index][part] += points assignment_result[machine_index][part] += points
component_points_cpy[part] -= points component_points[part] -= points
if component_points_cpy[part] == 0: if component_points[part] == 0:
finished_assign_counter += 1 finished_assign_counter += 1
assert sum(component_points_cpy) == 0 assert sum(component_points) == 0
val = 0
if estimator:
cp_items = estimator.convert(pcb_data, component_data, assignment_result)
for machine_index in range(machine_number):
cp_points, cp_nozzle, cp_width, cp_height, board_width, board_height = cp_items[machine_index]
# objective_value.append(
# estimator.neural_network(cp_points, cp_nozzle, cp_width, cp_height, board_width, board_height))
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
return objective_value_calculate(assignment_result, component_nozzle, task_block_weight), assignment_result return val, assignment_result
def greedy_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight): def greedy_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight):
pass # 不清楚原文想说什么 pass # 不清楚原文想说什么
def local_search_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight, def local_search_component_assignment(pcb_data, component_data, machine_number, estimator):
machine_number):
# maximum number of iterations : 5000 # maximum number of iterations : 5000
# maximum number of unsuccessful iterations: 50 # maximum number of unsuccessful iterations: 50
component_number = len(component_points) component_number = len(component_data)
iteration_counter, unsuccessful_iteration_counter = 5000, 50 iteration_counter, unsuccessful_iteration_counter = 5000, 50
optimal_val, optimal_assignment = random_component_assignment(component_points, component_nozzle, component_feeders, optimal_val, optimal_assignment = random_component_assignment(pcb_data, component_data, machine_number, estimator)
task_block_weight, machine_number)
for _ in range(iteration_counter): for _ in range(iteration_counter):
machine_index = random.randint(0, machine_number - 1) machine_idx = random.randint(0, machine_number - 1)
if sum(optimal_assignment[machine_index]) == 0: if sum(optimal_assignment[machine_idx]) == 0:
continue continue
part_set = [] part_set = []
for component_index in range(component_number): for part_idx in range(component_number):
if optimal_assignment[machine_index][component_index] != 0: if optimal_assignment[machine_idx][part_idx] != 0:
part_set.append(component_index) part_set.append(part_idx)
component_index = random.sample(part_set, 1)[0] part_idx = random.sample(part_set, 1)[0]
r = random.randint(1, optimal_assignment[machine_index][component_index]) r = random.randint(1, optimal_assignment[machine_idx][part_idx])
assignment = copy.deepcopy(optimal_assignment) assignment = copy.deepcopy(optimal_assignment)
cyclic_counter = 0 cyclic_counter = 0
swap_machine_index = None swap_machine_idx = None
while cyclic_counter <= 2 * machine_index: while cyclic_counter <= 2 * machine_idx:
cyclic_counter += 1 cyclic_counter += 1
swap_machine_index = random.randint(0, machine_number - 1) swap_machine_idx = random.randint(0, machine_number - 1)
feeder_available = 0 feeder_available = 0
for machine in range(machine_number): for machine in range(machine_number):
if optimal_assignment[machine][component_index] or machine == swap_machine_index: if optimal_assignment[machine][part_idx] or machine == swap_machine_idx:
feeder_available += 1 feeder_available += 1
if feeder_available <= component_feeders[component_index] and swap_machine_index != machine_index: if feeder_available <= component_data.iloc[part_idx]['feeder-limit'] and swap_machine_idx != machine_idx:
break break
assert swap_machine_index is not None assert swap_machine_idx is not None
assignment[machine_index][component_index] -= r assignment[machine_idx][part_idx] -= r
assignment[swap_machine_index][component_index] += r assignment[swap_machine_idx][part_idx] += r
val = objective_value_calculate(assignment, component_nozzle, task_block_weight)
val = 0
cp_items = estimator.convert(pcb_data, component_data, assignment)
for machine_index in range(machine_number):
cp_points, cp_nozzle, _, _, _, _ = cp_items[machine_index]
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
if val < optimal_val: if val < optimal_val:
optimal_assignment, optimal_val = assignment, val optimal_assignment, optimal_val = assignment, val
unsuccessful_iteration_counter = 50 unsuccessful_iteration_counter = 50
@ -140,9 +121,9 @@ def local_search_component_assignment(component_points, component_nozzle, compon
return optimal_val, optimal_assignment return optimal_val, optimal_assignment
def reconfig_crossover_operation(component_points, component_feeders, parent1, parent2, machine_number): def reconfig_crossover_operation(component_data, parent1, parent2, machine_number):
offspring1, offspring2 = copy.deepcopy(parent1), copy.deepcopy(parent2) offspring1, offspring2 = copy.deepcopy(parent1), copy.deepcopy(parent2)
component_number = len(component_points) component_number = len(component_data)
# === crossover === # === crossover ===
mask_bit = [] mask_bit = []
@ -161,57 +142,57 @@ def reconfig_crossover_operation(component_points, component_feeders, parent1, p
# === balancing === # === balancing ===
# equally to reach the correct number # equally to reach the correct number
for component_index in range(component_number): for part_index in range(component_number):
for offspring in [offspring1, offspring2]: for offspring in [offspring1, offspring2]:
additional_points = sum([offspring[mt][component_index] for mt in range(machine_number)]) - \ additional_points = sum([offspring[mt][part_index] for mt in range(machine_number)]) - \
component_points[component_index] component_data.iloc[part_index]['points']
if additional_points > 0: if additional_points > 0:
# if a component type has more placements, decrease the assigned values on every head equally keeping # if a component type has more placements, decrease the assigned values on every head equally keeping
# the proportion of the number of placement among the heads # the proportion of the number of placement among the heads
points_list = [] points_list = []
for machine_index in range(machine_number): for machine_index in range(machine_number):
points = math.floor( points = math.floor(
additional_points * offspring[machine_index][component_index] / component_points[component_index]) additional_points * offspring[machine_index][part_index] / component_data[part_index]['points'])
points_list.append(points) points_list.append(points)
offspring[machine_index][component_index] -= points offspring[machine_index][part_index] -= points
additional_points -= sum(points_list) additional_points -= sum(points_list)
for machine_index in range(machine_number): for machine_index in range(machine_number):
if additional_points == 0: if additional_points == 0:
break break
if offspring[machine_index][component_index] == 0: if offspring[machine_index][part_index] == 0:
continue continue
offspring[machine_index][component_index] -= 1 offspring[machine_index][part_index] -= 1
additional_points += 1 additional_points += 1
elif additional_points < 0: elif additional_points < 0:
# otherwise, increase the assigned nonzero values equally # otherwise, increase the assigned nonzero values equally
machine_set = [] machine_set = []
for machine_index in range(machine_number): for machine_index in range(machine_number):
if offspring[machine_index][component_index] == 0: if offspring[machine_index][part_index] == 0:
continue continue
machine_set.append(machine_index) machine_set.append(machine_index)
points = -math.ceil(additional_points / len(machine_set)) points = -math.ceil(additional_points / len(machine_set))
for machine_index in machine_set: for machine_index in machine_set:
offspring[machine_index][component_index] += points offspring[machine_index][part_index] += points
additional_points += points additional_points += points
for machine_index in machine_set: for machine_index in machine_set:
if additional_points == 0: if additional_points == 0:
break break
offspring[machine_index][component_index] += 1 offspring[machine_index][part_index] += 1
additional_points -= 1 additional_points -= 1
# === 结果校验 === # === 结果校验 ===
for offspring in [offspring1, offspring2]: for offspring in [offspring1, offspring2]:
for part in range(component_number): for part in range(component_number):
pt = sum(offspring[mt][part] for mt in range(machine_number)) pt = sum(offspring[mt][part] for mt in range(machine_number))
assert pt == component_points[part] assert pt == component_data.iloc[part]['points']
return offspring1, offspring2 return offspring1, offspring2
def reconfig_mutation_operation(component_feeders, parent, machine_number): def reconfig_mutation_operation(component_data, parent, machine_number):
offspring = copy.deepcopy(parent) offspring = copy.deepcopy(parent)
swap_direction = random.randint(0, 1) swap_direction = random.randint(0, 1)
@ -228,10 +209,10 @@ def reconfig_mutation_operation(component_feeders, parent, machine_number):
swap_points = random.randint(1, offspring[swap_machine1][swap_component_index]) swap_points = random.randint(1, offspring[swap_machine1][swap_component_index])
feeder_counter = 0 feeder_counter = 0
for machine_index in range(max_machine_index): for machine_index in range(machine_number):
if offspring[swap_machine1][swap_component_index] < swap_points or machine_index == swap_machine2: if offspring[swap_machine1][swap_component_index] < swap_points or machine_index == swap_machine2:
feeder_counter += 1 feeder_counter += 1
if feeder_counter > component_feeders[swap_component_index]: if feeder_counter > component_data.iloc[swap_component_index]['feeder-limit']:
return offspring return offspring
offspring[swap_machine1][swap_component_index] -= swap_points offspring[swap_machine1][swap_component_index] -= swap_points
@ -239,7 +220,7 @@ def reconfig_mutation_operation(component_feeders, parent, machine_number):
return offspring return offspring
def evolutionary_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight, machine_number): def evolutionary_component_assignment(pcb_data, component_data, machine_number, estimator):
# population size: 10 # population size: 10
# probability of the mutation: 0.1 # probability of the mutation: 0.1
# probability of the crossover: 0.8 # probability of the crossover: 0.8
@ -250,9 +231,7 @@ def evolutionary_component_assignment(component_points, component_nozzle, compon
population = [] population = []
for _ in range(population_size): for _ in range(population_size):
population.append( population.append(random_component_assignment(pcb_data, component_data, machine_number, None)[1])
random_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight,
machine_number)[1])
with tqdm(total=generation_number) as pbar: with tqdm(total=generation_number) as pbar:
pbar.set_description('evolutionary algorithm process for PCB assembly line balance') pbar.set_description('evolutionary algorithm process for PCB assembly line balance')
@ -262,7 +241,12 @@ def evolutionary_component_assignment(component_points, component_nozzle, compon
# calculate fitness value # calculate fitness value
pop_val = [] pop_val = []
for individual in population: for individual in population:
pop_val.append(objective_value_calculate(individual, component_nozzle, task_block_weight, machine_number)) val = 0
cp_items = estimator.convert(pcb_data, component_data, individual)
for machine_index in range(machine_number):
cp_points, cp_nozzle, _, _, _, _ = cp_items[machine_index]
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
pop_val.append(val)
select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False) select_index = get_top_k_value(pop_val, population_size - len(new_population), reverse=False)
population = [population[idx] for idx in select_index] population = [population[idx] for idx in select_index]
@ -270,7 +254,12 @@ def evolutionary_component_assignment(component_points, component_nozzle, compon
population += new_population population += new_population
for individual in new_population: for individual in new_population:
pop_val.append(objective_value_calculate(individual, component_nozzle, task_block_weight, machine_number)) cp_items = estimator.convert(pcb_data, component_data, individual)
val = 0
for machine_index in range(machine_index):
cp_points, cp_nozzle, _, _, _, _ = cp_items[machine_index]
val = max(val, estimator.heuristic(cp_points, cp_nozzle))
pop_val.append(val)
# min-max convert # min-max convert
max_val = max(pop_val) max_val = max(pop_val)
@ -288,15 +277,14 @@ def evolutionary_component_assignment(component_points, component_nozzle, compon
if index1 != index2: if index1 != index2:
break break
offspring1, offspring2 = reconfig_crossover_operation(component_points, component_feeders, offspring1, offspring2 = reconfig_crossover_operation(component_data, population[index1],
population[index1], population[index2], population[index2], machine_number)
machine_number)
if np.random.random() < mutation_rate: if np.random.random() < mutation_rate:
offspring1 = reconfig_mutation_operation(component_feeders, offspring1, machine_number) offspring1 = reconfig_mutation_operation(component_data, offspring1, machine_number)
if np.random.random() < mutation_rate: if np.random.random() < mutation_rate:
offspring2 = reconfig_mutation_operation(component_feeders, offspring2, machine_number) offspring2 = reconfig_mutation_operation(component_data, offspring2, machine_number)
new_population.append(offspring1) new_population.append(offspring1)
new_population.append(offspring2) new_population.append(offspring2)
@ -306,47 +294,26 @@ def evolutionary_component_assignment(component_points, component_nozzle, compon
return min(pop_val), population[np.argmin(pop_val)] return min(pop_val), population[np.argmin(pop_val)]
def reconfiguration_optimizer(pcb_data, component_data, machine_number): @timer_wrapper
# === data preparation === def line_optimizer_reconfiguration(component_data, pcb_data, machine_number):
component_number = len(component_data)
component_points = [0 for _ in range(component_number)]
component_nozzle = [0 for _ in range(component_number)]
component_feeders = [0 for _ in range(component_number)]
component_part = [0 for _ in range(component_number)]
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
nozzle = component_data.loc[part_index]['nz']
component_points[part_index] += 1
component_nozzle[part_index] = nozzle
component_part[part_index] = data['part']
component_feeders[part_index] = component_data.loc[part_index]['feeder-limit']
# === assignment of heads to modules is omitted === # === assignment of heads to modules is omitted ===
optimal_assignment, optimal_val = [], None optimal_assignment, optimal_val = [], None
estimator = Estimator(task_block_weight=5) # element from list [0, 1, 2, 5, 10] task_block ~= cycle
task_block_weight = 5 # element from list [0, 1, 2, 5, 10] task_block ~= cycle
# === assignment of components to heads # === assignment of components to heads
for i in range(5): for i in range(5):
if i == 0: if i == 0:
# random # random
val, assignment = random_component_assignment(component_points, component_nozzle, component_feeders, val, assignment = random_component_assignment(pcb_data, component_data, machine_number, estimator)
task_block_weight, machine_number)
elif i == 1: elif i == 1:
# brute force # brute force
# which is proved to be useless, since it only ran in reasonable time for the smaller test instances # which is proved to be useless, since it only ran in reasonable time for the smaller test instances
continue continue
elif i == 2: elif i == 2:
# local search # local search
val, assignment = local_search_component_assignment(component_points, component_nozzle, component_feeders, val, assignment = local_search_component_assignment(pcb_data, component_data, machine_number, estimator)
task_block_weight, machine_number)
elif i == 3: elif i == 3:
# evolutionary # evolutionary
val, assignment = evolutionary_component_assignment(component_points, component_nozzle, component_feeders, val, assignment = evolutionary_component_assignment(pcb_data, component_data, machine_number, estimator)
task_block_weight, machine_number)
else: else:
# greedy: unclear description # greedy: unclear description
continue continue