Compare commits

...

11 Commits

Author SHA1 Message Date
4fd5560650 增加了HSMO整线优化方法,读取数据增加了供料器部分 2025-08-10 16:58:42 +08:00
045f2f394d 增加预安装供料器功能、路径规划模型支持单点、整线优化支持批量处理 2024-11-01 09:14:44 +08:00
37f4e5b02c 整线优化第一版论文定稿工程
增加了整线批量测试
修改了现有min-max模型路径
修改了遗传算法整体框架
估计器增加异常数据剔除
封装优化结果类
修改供料器扫描算法中重复吸嘴组的判定
2024-06-26 09:44:08 +08:00
cbeba48da0 修改文件名属性 2024-06-05 22:10:21 +08:00
7c9a900b95 增加超启发式线体优化算法 2024-05-17 22:52:49 +08:00
6fa1f53f69 修改生成数据方式和网络训练方式 2024-04-06 13:44:05 +08:00
bae7e4e2c3 调整工程架构,增补了几种算法,初步添加神经网路训练拟合代码 2024-03-29 22:10:07 +08:00
800057e000 增加reconfig方法 2023-09-14 14:52:58 +08:00
afde7a853e Merge branch 'master' of github.com:hit-lu/assembly_line_optimizer
# Conflicts:
#	optimizer_genetic.py
2023-08-31 22:13:06 +08:00
87ddb057ca 修改整线算法评价函数 2023-08-03 19:33:43 +08:00
d5e8bbcc87 在代价函数中增加拾取次数计算 2023-08-03 00:26:40 +08:00
24 changed files with 7357 additions and 1962 deletions

4
.gitignore vendored
View File

@@ -6,3 +6,7 @@ __pycache__
Lib/
Scripts/
*.cfg
*.pkl
*.pth
*.m
opt/

View File

@@ -1,25 +1,30 @@
import copy
import time
import math
import random
import argparse
import os
import warnings
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from functools import wraps
from collections import defaultdict
from tqdm import tqdm
from gurobipy import *
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
# 整线参数
max_machine_index = 3
import os
import time
import math
import random
import copy
import torch
import torch.nn
import argparse
import joblib
import pickle
import warnings
import heapq
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import traceback
import openpyxl
# 时间参数
T_pp, T_tr, T_nc = 2, 5, 25
matplotlib.use('TkAgg')
# 机器参数
max_head_index, max_slot_index = 6, 120
@@ -32,7 +37,7 @@ head_nozzle = ['' for _ in range(max_head_index)] # 头上已经分配吸嘴
slotf1_pos, slotr1_pos = [-31.267, 44.], [807., 810.545] # F1(前基座最左侧)、R1(后基座最右侧)位置
fix_camera_pos = [269.531, 694.823] # 固定相机位置
anc_marker_pos = [336.457, 626.230] # ANC基准点位置
stopper_pos = [635.150, 124.738] # 止档块位置
stopper_pos = [665.150, 124.738] # 止档块位置
# 算法权重参数
e_nz_change, e_gang_pick = 4, 0.6
@@ -42,20 +47,97 @@ head_rotary_velocity = 8e-5 # 贴装头R轴旋转时间
x_max_velocity, y_max_velocity = 1.4, 1.2
x_max_acceleration, y_max_acceleration = x_max_velocity / 0.079, y_max_velocity / 0.079
# 不同种类供料器宽度
# TODO: 不同种类供料器宽度
# feeder_width = {'SM8': (7.25, 7.25), 'SM12': (7.25, 7.25), 'SM16': (7.25, 7.25),
# 'SM24': (7.25, 7.25), 'SM32': (7.25, 7.25)}
feeder_width = {'SM8': (7.25, 7.25), 'SM12': (7.00, 20.00), 'SM16': (7.00, 22.00),
'SM24': (7.00, 29.00), 'SM32': (7.00, 44.00)}
# 可用吸嘴数量限制
nozzle_limit = {'CN065': 6, 'CN040': 6, 'CN220': 6, 'CN400': 6, 'CN140': 6}
nozzle_limit = {'CN065': 6, 'CN040': 6, 'CN020': 6, 'CN400': 6, 'CN140': 6}
# 时间参数
t_cycle = 0.3
t_anc = 0.6
t_pick, t_place = .078, .051 # 贴装/拾取用时
t_nozzle_put, t_nozzle_pick = 0.9, 0.75 # 装卸吸嘴用时
t_nozzle_change = t_nozzle_put + t_nozzle_pick
t_fix_camera_check = 0.12 # 固定相机检测时间
# 时间参数(整线相关)
T_pp, T_tr, T_nc, T_pl = 2, 5, 25, 0
# 时间参数 (数据拟合获得)
# Fit_cy, Fit_nz, Fit_pu, Fit_pl, Fit_mv = 0.326, 0.870, 0.159, 0.041, 0.030
Fit_cy, Fit_nz, Fit_pu, Fit_pl, Fit_mv = 0.326, 0.870, 0.159, 0.041, 0.035
class Point:
def __init__(self, _x, _y, _r=0, _h=None):
self.x = _x
self.y = _y
self.r = _r
self.h = _h
class OptResult:
def __init__(self, cp_assign=None, cycle_assign=None, slot_assign=None, place_assign=None, sequence_assign=None):
self.component_assign = [] if cp_assign is None else cp_assign
self.cycle_assign = [] if cycle_assign is None else cycle_assign
self.feeder_slot_assign = [] if slot_assign is None else slot_assign
self.placement_assign = [] if place_assign is None else place_assign
self.head_sequence = [] if sequence_assign is None else sequence_assign
class OptInfo:
def __init__(self):
self.total_time = .0 # 总组装时间
self.total_points = 0 # 总贴装点数
self.total_components = 0 # 总元件数
self.pickup_time = .0 # 拾取过程运动时间
self.round_time = .0 # 往返基座/基板运动时间
self.place_time = .0 # 贴装过程运动时间
self.operation_time = .0 # 拾取/贴装/换吸嘴等机械动作用时
self.cycle_counter = 0 # 周期数
self.nozzle_change_counter = 0 # 吸嘴更换次数
self.anc_round_counter = 0 # 前往ANC次数
self.pickup_counter = 0 # 拾取次数
self.total_distance = .0 # 总移动路径
self.place_distance = .0 # 贴装移动路径
self.pickup_distance = .0 # 拾取移动路径
def print(self):
print('-Cycle counter: {}'.format(self.cycle_counter))
print(f'-Nozzle change counter: {self.nozzle_change_counter: d}')
print(f'-ANC round: {self.anc_round_counter: d}')
print(f'-Pick operation counter: {self.pickup_counter: d}')
print(f'-Pick time: {self.pickup_time: .3f}, Pick distance: {self.pickup_distance: .3f}')
print(f'-Place time: {self.place_time: .3f}, Place distance: {self.place_distance: .3f}')
print(
f'-Round time: {self.total_time - self.operation_time - self.pickup_time - self.place_time: .3f}, Round distance: '
f'{self.total_distance - self.pickup_distance - self.place_distance: .3f}')
print(f'-Round & place time per cycle: {(self.total_time - self.pickup_time - self.operation_time) * 1000.0 / (self.cycle_counter + 1e-10): .3f}, ', end='')
print(f'-Round & place distance per cycle: {(self.total_distance - self.pickup_distance) / (self.cycle_counter + 1e-10): .3f}')
minutes, seconds = int(self.total_time // 60), int(self.total_time) % 60
millisecond = int((self.total_time - minutes * 60 - seconds) * 60)
print(f'-Operation time: {self.operation_time: .3f}, ', end='')
if minutes > 0:
print(f'Total time: {minutes: d} min {seconds} s {millisecond: 2d} ms ({self.total_time: .3f}s)')
else:
print(f'Total time: {seconds} s {millisecond :2d} ms ({self.total_time :.3f}s)')
def metric(self):
return Fit_cy * self.cycle_counter + Fit_nz * self.nozzle_change_counter + Fit_pu * self.pickup_counter + \
Fit_pl * self.total_points + Fit_mv * self.pickup_distance
def axis_moving_time(distance, axis=0):
distance = abs(distance) * 1e-3
Lamax = x_max_velocity ** 2 / x_max_acceleration if axis == 0 else y_max_velocity ** 2 / y_max_acceleration
@@ -107,8 +189,12 @@ def timer_wrapper(func):
def measure_time(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
print("function {} running time : {} s".format(func.__name__, time.time() - start_time))
hinter = True
for key, val in kwargs.items():
if key == 'hinter':
hinter = val
if hinter:
print(f"function {func.__name__} running time : {time.time() - start_time:.3f} s")
return result
return measure_time
@@ -119,12 +205,13 @@ def feeder_assignment(component_data, pcb_data, component_result, cycle_result):
feeder_slot_result, feeder_group_result = [], []
feeder_limit = defaultdict(int)
for component in range(len(component_data)):
feeder_limit[component] = component_data.loc[component]['feeder-limit']
feeder_limit[component] = component_data.loc[component].fdn
for component_cycle in component_result:
new_feeder_group = []
for component in component_cycle:
if component == -1 or feeder_limit[component] == 0 or new_feeder_group.count(component) >= feeder_limit[component]:
if component == -1 or feeder_limit[component] == 0 or new_feeder_group.count(component) >= feeder_limit[
component]:
new_feeder_group.append(-1)
else:
new_feeder_group.append(component)
@@ -298,359 +385,6 @@ def feeder_assignment(component_data, pcb_data, component_result, cycle_result):
return feeder_slot_result
def dynamic_programming_cycle_path(pcb_data, cycle_placement, assigned_feeder):
head_sequence = []
num_pos = sum([placement != -1 for placement in cycle_placement]) + 1
pos, head_set = [], []
feeder_set = set()
for head, feeder in enumerate(assigned_feeder):
if feeder == -1:
continue
head_set.append(head)
placement = cycle_placement[head]
if feeder != -1 and placement == -1:
print(assigned_feeder)
print(cycle_placement)
pos.append([pcb_data.loc[placement]['x'] - head * head_interval + stopper_pos[0],
pcb_data.loc[placement]['y'] + stopper_pos[1]])
feeder_set.add(feeder - head * interval_ratio)
pos.insert(0, [slotf1_pos[0] + ((min(list(feeder_set)) + max(list(feeder_set))) / 2 - 1) * slot_interval,
slotf1_pos[1]])
def get_distance(pos_1, pos_2):
return math.sqrt((pos_1[0] - pos_2[0]) ** 2 + (pos_1[1] - pos_2[1]) ** 2)
# 各节点之间的距离
dist = [[get_distance(pos_1, pos_2) for pos_2 in pos] for pos_1 in pos]
min_dist = [[np.inf for _ in range(num_pos)] for s in range(1 << num_pos)]
min_path = [[[] for _ in range(num_pos)] for s in range(1 << num_pos)]
# 状压dp搜索
for s in range(1, 1 << num_pos, 2):
# 考虑节点集合s必须包括节点0
if not (s & 1):
continue
for j in range(1, num_pos):
# 终点j需在当前考虑节点集合s内
if not (s & (1 << j)):
continue
if s == int((1 << j) | 1):
# 若考虑节点集合s仅含节点0和节点jdp边界赋予初值
# print('j:', j)
min_path[s][j] = [j]
min_dist[s][j] = dist[0][j]
# 枚举下一个节点i更新
for i in range(1, num_pos):
# 下一个节点i需在考虑节点集合s外
if s & (1 << i):
continue
if min_dist[s][j] + dist[j][i] < min_dist[s | (1 << i)][i]:
min_path[s | (1 << i)][i] = min_path[s][j] + [i]
min_dist[s | (1 << i)][i] = min_dist[s][j] + dist[j][i]
ans_dist = float('inf')
ans_path = []
# 求最终最短哈密顿回路
for i in range(1, num_pos):
if min_dist[(1 << num_pos) - 1][i] + dist[i][0] < ans_dist:
# 更新,回路化
ans_path = min_path[s][i]
ans_dist = min_dist[(1 << num_pos) - 1][i] + dist[i][0]
for parent in ans_path:
head_sequence.append(head_set[parent - 1])
start_head, end_head = head_sequence[0], head_sequence[-1]
if pcb_data.loc[cycle_placement[start_head]]['x'] - start_head * head_interval > \
pcb_data.loc[cycle_placement[end_head]]['x'] - end_head * head_interval:
head_sequence = list(reversed(head_sequence))
return head_sequence
@timer_wrapper
def greedy_placement_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result):
placement_result, head_sequence_result = [], []
mount_point_index = [[] for _ in range(len(component_data))]
mount_point_pos = [[] for _ in range(len(component_data))]
for i in range(len(pcb_data)):
part = pcb_data.loc[i]['part']
component_index = component_data[component_data['part'] == part].index.tolist()[0]
# 记录贴装点序号索引和对应的位置坐标
mount_point_index[component_index].append(i)
mount_point_pos[component_index].append([pcb_data.loc[i]['x'], pcb_data.loc[i]['y']])
search_dir = 1 # 0自左向右搜索 1自右向左搜索
for cycle_set in range(len(component_result)):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
# search_dir = 1 - search_dir
assigned_placement = [-1] * max_head_index
max_pos = [max(mount_point_pos[component_index], key=lambda x: x[0]) for component_index in
range(len(mount_point_pos)) if len(mount_point_pos[component_index]) > 0][0][0]
min_pos = [min(mount_point_pos[component_index], key=lambda x: x[0]) for component_index in
range(len(mount_point_pos)) if len(mount_point_pos[component_index]) > 0][0][0]
point2head_range = min(math.floor((max_pos - min_pos) / head_interval) + 1, max_head_index)
# 最近邻确定
way_point = None
head_range = range(max_head_index - 1, -1, -1) if search_dir else range(max_head_index)
for head_counter, head in enumerate(head_range):
if component_result[cycle_set][head] == -1:
continue
component_index = component_result[cycle_set][head]
if way_point is None or head_counter % point2head_range == 0:
index = 0
if way_point is None:
if search_dir:
index = np.argmax(mount_point_pos[component_index], axis=0)[0]
else:
index = np.argmin(mount_point_pos[component_index], axis=0)[0]
else:
for next_head in head_range:
component_index = component_result[cycle_set][next_head]
if assigned_placement[next_head] == -1 and component_index != -1:
num_points = len(mount_point_pos[component_index])
index = np.argmin(
[abs(mount_point_pos[component_index][i][0] - way_point[0]) * .1 + abs(
mount_point_pos[component_index][i][1] - way_point[1]) for i in
range(num_points)])
head = next_head
break
# index = np.argmax(mount_point_pos[component_index], axis=0)[0]
assigned_placement[head] = mount_point_index[component_index][index]
# 记录路标点
way_point = mount_point_pos[component_index][index]
way_point[0] += (max_head_index - head - 1) * head_interval if search_dir else -head * head_interval
mount_point_index[component_index].pop(index)
mount_point_pos[component_index].pop(index)
else:
head_index, point_index = -1, -1
min_cheby_distance, min_euler_distance = float('inf'), float('inf')
for next_head in range(max_head_index):
if assigned_placement[next_head] != -1 or component_result[cycle_set][next_head] == -1:
continue
next_comp_index = component_result[cycle_set][next_head]
for counter in range(len(mount_point_pos[next_comp_index])):
if search_dir:
delta_x = abs(mount_point_pos[next_comp_index][counter][0] - way_point[0]
+ (max_head_index - next_head - 1) * head_interval)
else:
delta_x = abs(mount_point_pos[next_comp_index][counter][0] - way_point[0]
- next_head * head_interval)
delta_y = abs(mount_point_pos[next_comp_index][counter][1] - way_point[1])
euler_distance = pow(axis_moving_time(delta_x, 0), 2) + pow(axis_moving_time(delta_y, 1), 2)
cheby_distance = max(axis_moving_time(delta_x, 0),
axis_moving_time(delta_y, 1)) + 5e-2 * euler_distance
if cheby_distance < min_cheby_distance or (abs(cheby_distance - min_cheby_distance) < 1e-9
and euler_distance < min_euler_distance):
# if euler_distance < min_euler_distance:
min_cheby_distance, min_euler_distance = cheby_distance, euler_distance
head_index, point_index = next_head, counter
component_index = component_result[cycle_set][head_index]
assert (0 <= head_index < max_head_index)
assigned_placement[head_index] = mount_point_index[component_index][point_index]
way_point = mount_point_pos[component_index][point_index]
way_point[0] += (max_head_index - head_index - 1) * head_interval if search_dir \
else -head_index * head_interval
mount_point_index[component_index].pop(point_index)
mount_point_pos[component_index].pop(point_index)
placement_result.append(assigned_placement) # 各个头上贴装的元件类型
head_sequence_result.append(
dynamic_programming_cycle_path(pcb_data, assigned_placement, feeder_slot_result[cycle_set]))
return placement_result, head_sequence_result
@timer_wrapper
def beam_search_for_route_generation(component_data, pcb_data, component_result, cycle_result, feeder_slot_result):
beam_width = 4 # 集束宽度
base_points = [float('inf'), float('inf')]
mount_point_index = [[] for _ in range(len(component_data))]
mount_point_pos = [[] for _ in range(len(component_data))]
for i in range(len(pcb_data)):
part = pcb_data.loc[i]['part']
component_index = component_data[component_data['part'] == part].index.tolist()[0]
# 记录贴装点序号索引和对应的位置坐标
mount_point_index[component_index].append(i)
mount_point_pos[component_index].append([pcb_data.loc[i]['x'], pcb_data.loc[i]['y']])
# 记录最左下角坐标
if mount_point_pos[component_index][-1][0] < base_points[0]:
base_points[0] = mount_point_pos[component_index][-1][0]
if mount_point_pos[component_index][-1][1] < base_points[1]:
base_points[1] = mount_point_pos[component_index][-1][1]
beam_placement_sequence, beam_head_sequence = [], []
beam_mount_point_index, beam_mount_point_pos = [], []
for beam_counter in range(beam_width):
beam_mount_point_index.append(copy.deepcopy(mount_point_index))
beam_mount_point_pos.append(copy.deepcopy(mount_point_pos))
beam_placement_sequence.append([])
beam_head_sequence.append([])
beam_distance = [0 for _ in range(beam_width)] # 记录当前集束搜索点的点数
def argpartition(list, kth):
if kth < len(list):
return np.argpartition(list, kth)
else:
index, indexes = 0, []
while len(indexes) < kth:
indexes.append(index)
index += 1
if index >= len(list):
index = 0
return np.array(indexes)
with tqdm(total=100) as pbar:
search_dir = 0
pbar.set_description('route schedule')
for cycle_set in range(len(component_result)):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
search_dir = 1 - search_dir
beam_way_point = None
for beam_counter in range(beam_width):
beam_placement_sequence[beam_counter].append([-1 for _ in range(max_head_index)])
head_range = range(max_head_index - 1, -1, -1) if search_dir else range(max_head_index)
for head in head_range:
component_index = component_result[cycle_set][head]
if component_index == -1:
continue
if beam_way_point is None:
# 首个贴装点的选取距离基准点最近的beam_width个点
beam_way_point = [[0, 0]] * beam_width
for beam_counter in range(beam_width):
if search_dir:
index = np.argmax(beam_mount_point_pos[beam_counter][component_index], axis=0)[0]
else:
index = np.argmin(beam_mount_point_pos[beam_counter][component_index], axis=0)[0]
beam_placement_sequence[beam_counter][-1][head] = beam_mount_point_index[beam_counter][component_index][index]
beam_way_point[beam_counter] = beam_mount_point_pos[beam_counter][component_index][index]
beam_way_point[beam_counter][0] += (max_head_index - head - 1) * head_interval if \
search_dir else -head * head_interval
beam_mount_point_index[beam_counter][component_index].pop(index)
beam_mount_point_pos[beam_counter][component_index].pop(index)
else:
# 后续贴装点
search_beam_distance = []
search_beam_index = [0] * (beam_width ** 2)
for beam_counter in range(beam_width ** 2):
search_beam_distance.append(beam_distance[beam_counter // beam_width])
for beam_counter in range(beam_width):
# 对于集束beam_counter + 1最近的beam_width个点
num_points = len(beam_mount_point_pos[beam_counter][component_index])
dist = []
for i in range(num_points):
if search_dir:
delta_x = axis_moving_time(
beam_mount_point_pos[beam_counter][component_index][i][0] -
beam_way_point[beam_counter][0] + (max_head_index - head - 1) * head_interval,
0)
else:
delta_x = axis_moving_time(
beam_mount_point_pos[beam_counter][component_index][i][0] -
beam_way_point[beam_counter][0] - head * head_interval, 0)
delta_y = axis_moving_time(beam_mount_point_pos[beam_counter][component_index][i][1] -
beam_way_point[beam_counter][1], 1)
dist.append(max(delta_x, delta_y))
indexes = argpartition(dist, kth=beam_width)[:beam_width]
# 记录中间信息
for i, index in enumerate(indexes):
search_beam_distance[i + beam_counter * beam_width] += dist[index]
search_beam_index[i + beam_counter * beam_width] = index
indexes = np.argsort(search_beam_distance)
beam_mount_point_pos_cpy = copy.deepcopy(beam_mount_point_pos)
beam_mount_point_index_cpy = copy.deepcopy(beam_mount_point_index)
beam_placement_sequence_cpy = copy.deepcopy(beam_placement_sequence)
beam_head_sequence_cpy = copy.deepcopy(beam_head_sequence)
beam_counter = 0
assigned_placement = []
for i, index in enumerate(indexes):
# 拷贝原始集束数据
beam_mount_point_pos[beam_counter] = copy.deepcopy(beam_mount_point_pos_cpy[index // beam_width])
beam_mount_point_index[beam_counter] = copy.deepcopy(beam_mount_point_index_cpy[index // beam_width])
beam_placement_sequence[beam_counter] = copy.deepcopy(beam_placement_sequence_cpy[index // beam_width])
beam_head_sequence[beam_counter] = copy.deepcopy(beam_head_sequence_cpy[index // beam_width])
# 更新各集束最新扫描的的贴装点
component_index = component_result[cycle_set][head]
beam_placement_sequence[beam_counter][-1][head] = \
beam_mount_point_index[beam_counter][component_index][search_beam_index[index]]
if beam_placement_sequence[beam_counter][
-1] in assigned_placement and beam_width - beam_counter < len(indexes) - i:
continue
assigned_placement.append(beam_placement_sequence[beam_counter][-1])
# 更新参考基准点
beam_way_point[beam_counter] = beam_mount_point_pos[beam_counter][component_index][search_beam_index[index]]
beam_way_point[beam_counter][0] += (max_head_index - head - 1) * head_interval if \
search_dir else -head * head_interval
# 更新各集束贴装路径长度,移除各集束已分配的贴装点
beam_distance[beam_counter] = search_beam_distance[index]
beam_mount_point_pos[beam_counter][component_index].pop(search_beam_index[index])
beam_mount_point_index[beam_counter][component_index].pop(search_beam_index[index])
beam_counter += 1
if beam_counter >= beam_width:
break
assert(beam_counter >= beam_width)
# 更新头贴装顺序
for beam_counter in range(beam_width):
beam_head_sequence[beam_counter].append(
dynamic_programming_cycle_path(pcb_data, beam_placement_sequence[beam_counter][-1],
feeder_slot_result[cycle_set]))
pbar.update(1 / sum(cycle_result) * 100)
index = np.argmin(beam_distance)
return beam_placement_sequence[index], beam_head_sequence[index]
def optimal_nozzle_assignment(component_data, pcb_data):
# === Nozzle Assignment ===
# number of points for nozzle & number of heads for nozzle
@@ -882,18 +616,18 @@ def swap_mutation(parent):
return parent
def constraint_swap_mutation(component_points, individual):
def constraint_swap_mutation(component_points, individual, machine_number):
offspring = individual.copy()
idx, component_index = 0, random.randint(0, len(component_points) - 1)
for _, points in component_points:
for points in component_points.values():
if component_index == 0:
while True:
index1, index2 = random.sample(range(points + max_machine_index - 2), 2)
index1, index2 = random.sample(range(points + machine_number - 1), 2)
if offspring[idx + index1] != offspring[idx + index2]:
break
clip = offspring[idx: idx + points + max_machine_index - 1].copy()
clip = offspring[idx: idx + points + machine_number - 1].copy()
avl_machine = 0
for idx_, gene in enumerate(clip):
if gene == 0 and (idx_ == 0 or clip[idx_ - 1] != 0):
@@ -911,7 +645,7 @@ def constraint_swap_mutation(component_points, individual):
break
component_index -= 1
idx += (points + max_machine_index - 1)
idx += (points + machine_number - 1)
return offspring
@@ -923,6 +657,7 @@ def random_selective(data, possibility): # 依概率选择随机数
possibility = [p / sum_val for p in possibility]
random_val = random.random()
idx = 0
for idx, val in enumerate(possibility):
random_val -= val
if random_val <= 0:
@@ -958,3 +693,186 @@ def get_top_k_value(pop_val, k: int, reverse=True):
res.append(j)
break
return res
def get_line_config_number(machine_number, component_number):
div_counter = 0
div_set = set()
for div1 in range(component_number - 2):
for div2 in range(div1 + 1, component_number - 1):
machine_div = [div1 + 1, div2 - div1, component_number - div2 - 1]
machine_div.sort()
div_str = "".join(str(s) + '|' for s in machine_div)
if div_str in div_set:
continue
div_set.add(div_str)
assign_component_counter = defaultdict(list)
for div in machine_div:
assign_component_counter[div] += 1
case_div_counter, case_comp_number = 1, component_number
for idx in range(machine_number - 1):
div = 1
while machine_div[idx]:
div *= (case_comp_number / machine_div[idx])
case_comp_number -= 1
machine_div[idx] -= 1
case_div_counter *= div
for key, val in assign_component_counter.values():
div = 1
while val:
div *= val
val -= 1
case_div_counter /= div
div_counter += case_div_counter
return div_counter
def convert_line_assigment(pcb_data, component_data, assignment_result):
machine_number = len(assignment_result)
placement_points = []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(machine_number):
if pcb_data is not None:
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
if pcb_data is not None:
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data.fdn
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
if pcb_data is None:
partial_component_data[machine_index].loc[part_index, 'points'] = assignment_result[machine_index][
part_index]
else:
partial_component_data[machine_index].loc[part_index, 'points'] = 0
for machine_index in range(machine_number):
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index].fdn = 1
feeder_limit -= 1
while feeder_limit:
assign_machine = None
for machine_index in range(machine_number):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
if assign_machine is None or feeder_points[machine_index] / \
partial_component_data[machine_index].loc[part_index].fdn > feeder_points[
assign_machine] / partial_component_data[assign_machine].loc[part_index].fdn:
assign_machine = machine_index
assert assign_machine is not None
partial_component_data[assign_machine].loc[part_index, 'fdn'] += 1
feeder_limit -= 1
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index].fdn > 0
# === assign placements ===
if pcb_data is not None:
part2idx = defaultdict(int)
for idx, data in component_data.iterrows():
part2idx[data.part] = idx
machine_average_pos = [[0, 0] for _ in range(machine_number)]
machine_step_counter = [0 for _ in range(machine_number)]
part_pcb_data = defaultdict(list)
for _, data in pcb_data.iterrows():
part_pcb_data[part2idx[data.part]].append(data)
multiple_component_index = []
for part_index in range(len(component_data)):
machine_assign_set = []
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index]:
machine_assign_set.append(machine_index)
if len(machine_assign_set) == 1:
for data in part_pcb_data[part_index]:
machine_index = machine_assign_set[0]
machine_average_pos[machine_index][0] += data.x
machine_average_pos[machine_index][1] += data.y
machine_step_counter[machine_index] += 1
partial_component_data[machine_index].loc[part_index, 'points'] += 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
elif len(machine_assign_set) > 1:
multiple_component_index.append(part_index)
for machine_index in range(machine_number):
if machine_step_counter[machine_index] == 0:
continue
machine_average_pos[machine_index][0] /= machine_step_counter[machine_index]
machine_average_pos[machine_index][1] /= machine_step_counter[machine_index]
for part_index in multiple_component_index:
for data in part_pcb_data[part_index]:
idx = -1
min_dist = None
for machine_index in range(machine_number):
if partial_component_data[machine_index].loc[part_index, 'points'] >= \
assignment_result[machine_index][part_index]:
continue
dist = (data.x - machine_average_pos[machine_index][0]) ** 2 + (
data.y - machine_average_pos[machine_index][1]) ** 2
if min_dist is None or dist < min_dist:
min_dist, idx = dist, machine_index
assert idx >= 0
machine_step_counter[idx] += 1
machine_average_pos[idx][0] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][0] \
+ data.x / machine_step_counter[idx]
machine_average_pos[idx][1] += (1 - 1 / machine_step_counter[idx]) * machine_average_pos[idx][1] \
+ data.y / machine_step_counter[idx]
partial_component_data[idx].loc[part_index, 'points'] += 1
partial_pcb_data[idx] = pd.concat([partial_pcb_data[idx], pd.DataFrame(data).T])
for machine_index in range(machine_number):
partial_component_data[machine_index] = partial_component_data[machine_index][
partial_component_data[machine_index]['points'] != 0].reset_index(drop=True)
return partial_pcb_data, partial_component_data
def random_division(num, div):
assert num >= div
res = [1 for _ in range(num)]
while sum(res) < num:
pos = random.randint(0, num - 1)
val = random.randint(1, num - sum(res))
res[pos] = val
return res
def list_range(start, end=None):
return list(range(start)) if end is None else list(range(start, end))
def kth_indices_partition(num, kth):
if len(num) > kth:
return np.argpartition(num, kth)
else:
return np.array(range(len(num)))

View File

@@ -1,720 +0,0 @@
from base_optimizer.optimizer_common import *
@timer_wrapper
def feeder_allocate(component_data, pcb_data, feeder_data, figure=False):
feeder_points, feeder_division_points = defaultdict(int), defaultdict(int) # 供料器贴装点数
mount_center_pos = defaultdict(int)
feeder_limit, feeder_arrange = defaultdict(int), defaultdict(int)
part_nozzle = defaultdict(str)
feeder_base = [-2] * max_slot_index # 已安装在供料器基座上的元件(-2: 未分配,-1: 占用状态)
feeder_base_points = [0] * max_slot_index # 供料器基座结余贴装点数量
for data in pcb_data.iterrows():
pos, part = data[1]['x'] + stopper_pos[0], data[1]['part']
part_index = component_data[component_data['part'] == part].index.tolist()[0]
if part not in component_data:
feeder_limit[part_index] = component_data.loc[part_index]['feeder-limit']
feeder_arrange[part_index] = 0
feeder_points[part_index] += 1
mount_center_pos[part_index] += ((pos - mount_center_pos[part_index]) / feeder_points[part_index])
part_nozzle[part_index] = component_data.loc[part_index]['nz']
for part_index, points in feeder_points.items():
feeder_division_points[part_index] = max(points // feeder_limit[part_index], 1)
nozzle_component, nozzle_component_points = defaultdict(list), defaultdict(list)
for part, nozzle in part_nozzle.items():
for _ in range(feeder_limit[part]):
nozzle_component[nozzle].append(part)
nozzle_component_points[nozzle].append(feeder_points[part])
if feeder_data is not None:
for _, feeder in feeder_data.iterrows():
slot, part = feeder['slot'], feeder['part']
part_index = component_data[component_data['part'] == part].index.tolist()[0]
# 供料器基座分配位置和对应贴装点数
feeder_base[slot], feeder_base_points[slot] = part_index, feeder_division_points[part_index]
feeder_type = component_data.loc[part_index]['fdr']
extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval
while extra_width > 0:
slot += 1
feeder_base[slot] = -1
extra_width -= slot_interval
feeder_limit[part_index] -= 1
feeder_arrange[part_index] += 1
if feeder_limit[part_index] < 0:
info = 'the number of arranged feeder for [' + part + '] exceeds the quantity limit'
raise ValueError(info)
for nozzle, components in nozzle_component.items():
if part_index in components:
index_ = components.index(part_index)
nozzle_component[nozzle].pop(index_)
nozzle_component_points[nozzle].pop(index_)
break
nozzle_assigned_counter = optimal_nozzle_assignment(component_data, pcb_data)
head_assign_indexes = list(range(max_head_index))
nozzle_pattern, optimal_nozzle_pattern, optimal_nozzle_points = [], None, 0
# nozzle_pattern = ['CN220', 'CN065','CN065','CN065','CN065','CN220']
# 先排序
nozzle_pattern_list = []
for nozzle, counter in nozzle_assigned_counter.items():
nozzle_pattern_list.append([nozzle, sum(nozzle_component_points[nozzle]) // counter])
nozzle_pattern_list.sort(key=lambda x: x[1], reverse=True)
# 后确定吸嘴分配模式
head_index = [3, 2, 4, 1, 5, 0]
nozzle_pattern = [0] * max_head_index
for nozzle, _ in nozzle_pattern_list:
counter = nozzle_assigned_counter[nozzle]
while counter:
nozzle_pattern[head_index[0]] = nozzle
counter -= 1
head_index.pop(0)
while True:
best_assign, best_assign_points = [], []
best_assign_slot, best_assign_value = -1, -np.Inf
best_nozzle_component, best_nozzle_component_points = None, None
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
nozzle_assigned_counter_cpy = copy.deepcopy(nozzle_assigned_counter)
feeder_assign, feeder_assign_points = [], []
tmp_feeder_limit, tmp_feeder_points = feeder_limit.copy(), feeder_points.copy()
tmp_nozzle_component, tmp_nozzle_component_points = copy.deepcopy(nozzle_component), copy.deepcopy(
nozzle_component_points)
# 记录扫描到的已安装的供料器元件类型
for head in range(max_head_index):
feeder_assign.append(feeder_base[slot + head * interval_ratio])
if scan_part := feeder_assign[-1] >= 0:
nozzle = part_nozzle[scan_part]
feeder_assign_points.append(feeder_base_points[slot + head * interval_ratio])
if feeder_assign_points[-1] <= 0:
feeder_assign[-1], feeder_assign_points[-1] = -1, 0
elif nozzle in nozzle_assigned_counter_cpy.keys():
nozzle_assigned_counter_cpy[nozzle] -= 1
if nozzle_assigned_counter_cpy[nozzle] == 0:
nozzle_assigned_counter_cpy.pop(nozzle)
else:
feeder_assign_points.append(0)
if -2 not in feeder_assign: # 无可用槽位
if sum(feeder_assign_points) > optimal_nozzle_points:
optimal_nozzle_points = sum(feeder_assign_points)
optimal_nozzle_pattern = [''] * max_head_index
for head in range(max_head_index):
optimal_nozzle_pattern[head] = part_nozzle[feeder_assign[head]]
continue
assign_part_stack, assign_part_stack_points = [], []
for idx in head_assign_indexes:
if feeder_assign[idx] != -2:
continue
if len(nozzle_pattern) == 0: # 吸嘴匹配模式为空,优先分配元件,根据分配元件倒推吸嘴匹配模式
nozzle_assign = ''
max_points, max_nozzle_points = 0, 0
for nozzle in nozzle_assigned_counter_cpy.keys():
if len(tmp_nozzle_component[nozzle]) == 0:
continue
part = max(tmp_nozzle_component[nozzle],
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x] if
tmp_feeder_points[x] != 0 else 0)
index_ = tmp_nozzle_component[nozzle].index(part)
if max_points < tmp_nozzle_component_points[nozzle][index_]:
max_points, nozzle_assign = tmp_nozzle_component_points[nozzle][index_], nozzle
else:
# 吸嘴匹配模式非空,按对应吸嘴类型进行元件分配
nozzle_assign = nozzle_pattern[idx]
if len(tmp_nozzle_component[nozzle_assign]) == 0:
# 当前头对应吸嘴类型无可用元件,将计划分配的元件压入堆栈
part = max(tmp_feeder_points.keys(),
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x] if tmp_feeder_limit[
x] != 0 else 0)
for nozzle, component_list in tmp_nozzle_component.items():
if part in component_list:
nozzle_assign = nozzle
assign_part_stack.append(part)
assign_part_stack_points.append(feeder_division_points[part])
break
else:
# 当前头对应吸嘴类型有可用元件,直接分配对应类型的元件
index_ = tmp_nozzle_component[nozzle_assign].index(max(tmp_nozzle_component[nozzle_assign],
key=lambda x: tmp_feeder_points[x] /
tmp_feeder_limit[x] if
tmp_feeder_limit[x] != 0 else 0))
part = tmp_nozzle_component[nozzle_assign][index_]
feeder_type = component_data.loc[part]['fdr']
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval, 1
slot_overlap = False
while extra_width > 0:
slot_ = slot + idx * interval_ratio + extra_slot
if feeder_base[slot_] != -2 or slot_ > max_slot_index // 2:
slot_overlap = True
break
extra_width -= slot_interval
extra_slot += 1
# 可用供料器数目充足且不存在和已有供料器的占位冲突
if tmp_feeder_limit[part] > 0 and not slot_overlap:
feeder_assign[idx], feeder_assign_points[idx] = part, feeder_division_points[part]
extra_width, extra_head = feeder_width[feeder_type][0] + feeder_width[feeder_type][
1] - head_interval, 1
while extra_width > 0 and idx + extra_head < max_head_index:
feeder_assign[idx + extra_head] = -1
extra_head += 1
extra_width -= head_interval
else:
part = -1 # 存在位置冲突的元件,不占用可用供料器数
# 更新吸嘴匹配模式的吸嘴数
if nozzle_assign in nozzle_assigned_counter_cpy.keys():
nozzle_assigned_counter_cpy[nozzle_assign] -= 1
if nozzle_assigned_counter_cpy[nozzle_assign] == 0:
nozzle_assigned_counter_cpy.pop(nozzle_assign)
if part >= 0 and tmp_feeder_limit[part] == 0:
continue
if part in tmp_nozzle_component[nozzle_assign]:
part_index = tmp_nozzle_component[nozzle_assign].index(part)
tmp_nozzle_component[nozzle_assign].pop(part_index)
tmp_nozzle_component_points[nozzle_assign].pop(part_index)
tmp_feeder_limit[part] -= 1
tmp_feeder_points[part] -= feeder_division_points[part]
# 元件堆栈出栈,首先分配吸嘴类型一致的头
if nozzle_pattern:
for head, feeder in enumerate(feeder_assign):
if feeder != -2:
continue
for idx, part in enumerate(assign_part_stack):
feeder_type = component_data.loc[part]['fdr']
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][
1] - slot_interval, 1
slot_overlap = False
while extra_width > 0:
slot_ = slot + head * interval_ratio + extra_slot
if feeder_base[slot_] != -2 or slot_ > max_slot_index // 2:
slot_overlap = True
break
extra_width -= slot_interval
extra_slot += 1
if component_data.loc[part]['nz'] == nozzle_pattern[head] and not slot_overlap:
feeder_assign[head], feeder_assign_points[head] = assign_part_stack[idx], \
assign_part_stack_points[idx]
assign_part_stack.pop(idx)
assign_part_stack_points.pop(idx)
break
# 元件堆栈,然后分配元件堆栈中未分配的其它元件
for head in head_assign_indexes:
if feeder_assign[head] != -2 or len(assign_part_stack) == 0:
continue
part, points = assign_part_stack[0], assign_part_stack_points[0]
feeder_type = component_data.loc[part]['fdr']
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval, 1
slot_overlap = False
while extra_width > 0:
slot_ = slot + head * interval_ratio + extra_slot
if feeder_base[slot_] != -2 or slot_ > max_slot_index // 2:
slot_overlap = True
break
extra_width -= slot_interval
extra_slot += 1
if not slot_overlap:
feeder_assign[head], feeder_assign_points[head] = part, points
extra_width, extra_head = feeder_width[feeder_type][0] + feeder_width[feeder_type][
1] - head_interval, 1
while extra_width > 0 and head + extra_head < max_head_index:
feeder_assign[head + extra_head] = -1
extra_head += 1
extra_width -= head_interval
else:
# 返还由于机械限位无法分配的,压入元件堆栈中的元素
nozzle = component_data.loc[part]['nz']
tmp_nozzle_component[nozzle].insert(0, part)
tmp_nozzle_component_points[nozzle].insert(0, points)
assign_part_stack.pop(0)
assign_part_stack_points.pop(0)
# 仍然存在由于机械限位,无法进行分配的在堆栈中的元件
while assign_part_stack:
part, points = assign_part_stack[0], assign_part_stack_points[0]
nozzle = component_data.loc[part]['nz']
tmp_nozzle_component[nozzle].insert(0, part)
tmp_nozzle_component_points[nozzle].insert(0, points)
assign_part_stack.pop(0)
assign_part_stack_points.pop(0)
nozzle_change_counter, average_slot = 0, []
for head, feeder_ in enumerate(feeder_assign):
if feeder_ < 0:
continue
average_slot.append(
(mount_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1 - head * interval_ratio)
if nozzle_pattern and component_data.loc[feeder_]['nz'] != nozzle_pattern[head]:
nozzle_change_counter += 1
if len(average_slot) == 0:
continue
average_slot = sum(average_slot) / len(average_slot)
assign_value = 0
feeder_assign_points_cpy = feeder_assign_points.copy()
while True:
points_filter = list(filter(lambda x: x > 0, feeder_assign_points_cpy))
if not points_filter:
break
assign_value += e_gang_pick * min(points_filter) * (len(points_filter) - 1)
for head, _ in enumerate(feeder_assign_points_cpy):
if feeder_assign_points_cpy[head] == 0:
continue
feeder_assign_points_cpy[head] -= min(points_filter)
assign_value -= 1e2 * e_nz_change * nozzle_change_counter + 1e-5 * abs(slot - average_slot)
if assign_value >= best_assign_value and sum(feeder_assign_points) != 0:
best_assign_value = assign_value
best_assign = feeder_assign.copy()
best_assign_points = feeder_assign_points.copy()
best_assign_slot = slot
best_nozzle_component, best_nozzle_component_points = tmp_nozzle_component, tmp_nozzle_component_points
if not best_assign_points:
break
if len(nozzle_pattern) == 0:
nozzle_pattern = [''] * max_head_index
for idx, part in enumerate(best_assign):
if part < 0:
continue
# 新安装的供料器
if feeder_base[best_assign_slot + idx * interval_ratio] != part:
# 除去分配给最大化同时拾取周期的项,保留结余项
feeder_base_points[best_assign_slot + idx * interval_ratio] += (
feeder_division_points[part] - min(filter(lambda x: x > 0, best_assign_points)))
feeder_points[part] -= feeder_division_points[part]
feeder_limit[part] -= 1
feeder_arrange[part] += 1
if feeder_limit[part] == 0:
feeder_division_points[part] = 0
for nozzle, components in nozzle_component.items():
if part in components:
index_ = components.index(part)
nozzle_component[nozzle].pop(index_)
nozzle_component_points[nozzle].pop(index_)
break
feeder_division_points[part] = 0
else:
# 已有的供料器
feeder_base_points[best_assign_slot + idx * interval_ratio] -= min(
filter(lambda x: x > 0, best_assign_points))
# 更新供料器基座信息
feeder_base[best_assign_slot + idx * interval_ratio] = part
feeder_type, extra_slot = component_data.loc[part]['fdr'], 0
extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval
while extra_width > 0:
extra_slot += 1
if feeder_base[best_assign_slot + idx * interval_ratio + extra_slot] == -2:
feeder_base[best_assign_slot + idx * interval_ratio + extra_slot] = -1 # 标记槽位已占用
extra_width -= slot_interval
# 更新吸嘴信息
nozzle_pattern[idx] = component_data.loc[part]['nz']
# 更新头分配的先后顺序
head_assign_indexes = np.array(best_assign_points).argsort().tolist()
nozzle_component, nozzle_component_points = copy.deepcopy(best_nozzle_component), copy.deepcopy(
best_nozzle_component_points)
if sum(best_assign_points) > optimal_nozzle_points:
optimal_nozzle_points = sum(best_assign_points)
optimal_nozzle_pattern = nozzle_pattern.copy()
assert not list(filter(lambda x: x < 0, feeder_limit.values())) # 分配供料器数目在限制范围内
# 若所有供料器均安装在基座上,重新对基座进行扫描,确定最优吸嘴模式(有序)
if not optimal_nozzle_points:
feeder_base, feeder_base_points = [-2] * max_slot_index, [0] * max_slot_index
for _, feeder in feeder_data.iterrows():
slot, part = feeder['slot'], feeder['part']
part_index = component_data[component_data['part'] == part].index.tolist()[0]
# 供料器基座分配位置和对应贴装点数
feeder_base[slot], feeder_base_points[slot] = part_index, feeder_division_points[part_index]
# 前基座 TODO: 后基座
for slot in range(max_slot_index // 2 - (max_head_index - 1) * interval_ratio):
sum_scan_points = 0
for head in range(max_head_index):
sum_scan_points += feeder_base_points[slot + head * interval_ratio]
if sum_scan_points > optimal_nozzle_points:
optimal_nozzle_pattern = ['' for _ in range(max_head_index)]
for head in range(max_head_index):
if part := feeder_base[slot + head * interval_ratio] == -2:
continue
optimal_nozzle_pattern[head] = part_nozzle[part]
# 更新供料器占位信息
for _, data in feeder_data.iterrows():
feeder_base[data['slot']] = -1
for slot, feeder in enumerate(feeder_base):
if feeder < 0:
continue
part = component_data.loc[feeder]['part']
feeder_data.loc[len(feeder_data.index)] = [slot, part, 0]
if figure:
# 绘制供料器位置布局
for slot in range(max_slot_index // 2):
plt.scatter(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1], marker='x', s=12, color='black', alpha=0.5)
plt.text(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1] - 45, slot + 1, ha='center', va='bottom',
size=8)
feeder_assign_range = []
for feeder in feeder_data.iterrows():
slot, part = feeder[1]['slot'], feeder[1]['part']
part_index = component_data[component_data['part'] == part].index.tolist()[0]
feeder_type = component_data.loc[part_index]['fdr']
width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1]
start = slotf1_pos[0] + slot_interval * (slot - 1) - slot_interval / 2
end = slotf1_pos[0] + slot_interval * (slot - 1) - slot_interval / 2 + width
rec_x = [start, end, end, start]
rec_y = [slotf1_pos[1] - 40, slotf1_pos[1] - 40, slotf1_pos[1] + 10, slotf1_pos[1] + 10]
c = 'red' if feeder[1]['arg'] == 0 else 'black' # 黑色表示已分配,红色表示新分配
plt.text(slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1] + 12,
part + ': ' + str(feeder_points[part_index]), ha='center', size=7, rotation=90, color=c)
plt.fill(rec_x, rec_y, facecolor='yellow', alpha=0.4)
feeder_assign_range.append([start, end])
# 记录重叠区间
feeder_assign_range.sort(key=lambda x: x[0])
for i in range(1, len(feeder_assign_range)):
if feeder_assign_range[i][0] < feeder_assign_range[i - 1][1]:
start, end = feeder_assign_range[i][0], feeder_assign_range[i - 1][1]
rec_x = [start, end, end, start]
rec_y = [slotf1_pos[1] - 40, slotf1_pos[1] - 40, slotf1_pos[1] + 10, slotf1_pos[1] + 10]
plt.fill(rec_x, rec_y, facecolor='red')
plt.plot([slotf1_pos[0] - slot_interval / 2, slotf1_pos[0] + slot_interval * (max_slot_index // 2 - 1 + 0.5)],
[slotf1_pos[1] + 10, slotf1_pos[1] + 10], color='black')
plt.plot([slotf1_pos[0] - slot_interval / 2, slotf1_pos[0] + slot_interval * (max_slot_index // 2 - 1 + 0.5)],
[slotf1_pos[1] - 40, slotf1_pos[1] - 40], color='black')
for counter in range(max_slot_index // 2 + 1):
pos = slotf1_pos[0] + (counter - 0.5) * slot_interval
plt.plot([pos, pos], [slotf1_pos[1] + 10, slotf1_pos[1] - 40], color='black', linewidth=1)
plt.ylim(-10, 100)
plt.show()
return optimal_nozzle_pattern
@timer_wrapper
def feeder_base_scan(component_data, pcb_data, feeder_data, nozzle_pattern):
feeder_assign_check = set()
for feeder in feeder_data.iterrows():
feeder_assign_check.add(feeder[1]['part'])
component_points = [0] * len(component_data)
for step in pcb_data.iterrows():
part = step[1]['part']
part_index = component_data[component_data['part'] == part].index.tolist()[0]
component_points[part_index] += 1
nozzle_type = component_data.loc[part_index]['nz']
if nozzle_type not in nozzle_limit.keys() or nozzle_limit[nozzle_type] <= 0:
info = 'there is no available nozzle [' + nozzle_type + '] for the assembly process'
raise ValueError(info)
assert len(feeder_assign_check) == len(component_points) - component_points.count(0) # 所有供料器均已分配槽位
feeder_part = [-1] * max_slot_index
for feeder in feeder_data.iterrows():
part, slot = feeder[1]['part'], feeder[1]['slot']
part_index = component_data[component_data['part'] == part].index.tolist()
if len(part_index) != 1:
print('unregistered component: ', part, ' in slot', slot)
continue
part_index = part_index[0]
feeder_part[slot] = part_index
component_result, cycle_result, feeder_slot_result = [], [], [] # 贴装点索引和拾取槽位优化结果
nozzle_mode = [nozzle_pattern] # 吸嘴匹配模式
with tqdm(total=len(pcb_data)) as pbar:
pbar.set_description('feeder scan process')
pbar_prev = 0
value_increment_base = 0
while True:
# === 周期内循环 ===
assigned_part = [-1 for _ in range(max_head_index)] # 当前扫描到的头分配元件信息
assigned_cycle = [0 for _ in range(max_head_index)] # 当前扫描到的元件最大分配次数
assigned_slot = [-1 for _ in range(max_head_index)] # 当前扫描到的供料器分配信息
best_assigned_eval_func = -float('inf')
nozzle_insert_cycle = 0
for cycle_index, nozzle_cycle in enumerate(nozzle_mode):
scan_eval_func_list = [] # 若干次扫描得到的最优解
# nozzle_cycle 吸嘴模式下,已扫描到的最优结果
cur_scan_part = [-1 for _ in range(max_head_index)]
cur_scan_cycle = [0 for _ in range(max_head_index)]
cur_scan_slot = [-1 for _ in range(max_head_index)]
cur_nozzle_limit = copy.deepcopy(nozzle_limit)
while True:
best_scan_part, best_scan_cycle = [-1 for _ in range(max_head_index)], [-1 for _ in
range(max_head_index)]
best_scan_slot = [-1 for _ in range(max_head_index)]
best_scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit)
scan_eval_func, search_break = -float('inf'), True
# 前供料器基座扫描
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
scan_cycle, scan_part, scan_slot = cur_scan_cycle.copy(), cur_scan_part.copy(), cur_scan_slot.copy()
scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit)
# 预扫描确定各类型元件拾取数目(前瞻)
preview_scan_part = defaultdict(int)
for head in range(max_head_index):
part = feeder_part[slot + head * interval_ratio]
# 贴装头和拾取槽位满足对应关系
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
part) < component_points[part]:
preview_scan_part[part] += 1
component_counter = 0
for head in range(max_head_index):
part = feeder_part[slot + head * interval_ratio]
# 1.匹配条件满足: 贴装头和拾取槽位满足对应关系
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
part) < component_points[part]:
# 2.匹配条件满足:不超过可用吸嘴数的限制
nozzle = component_data.loc[part]['nz']
if scan_nozzle_limit[nozzle] <= 0:
continue
# 3.增量条件满足: 引入新的元件类型不会使代价函数的值减少(前瞻)
if scan_cycle.count(0) == max_head_index:
gang_pick_change = component_points[part]
else:
prev_cycle = min(filter(lambda x: x > 0, scan_cycle))
# 同时拾取数的提升
gang_pick_change = min(prev_cycle, component_points[part] // preview_scan_part[part])
# 4.拾取移动距离条件满足: 邻近元件进行同时抓取,降低移动路径长度
# reference_slot = -1
# for head_, slot_ in enumerate(scan_slot):
# if slot_ != -1:
# reference_slot = slot_ - head_ * interval_ratio
# if reference_slot != -1 and abs(reference_slot - slot) > (max_head_index - 1) * interval_ratio:
# continue
# 5.同时拾取的增量 和 吸嘴更换次数比较
prev_nozzle_change = 0
if cycle_index + 1 < len(nozzle_mode):
prev_nozzle_change = 2 * (nozzle_cycle[head] != nozzle_mode[cycle_index + 1][head])
# 避免首个周期吸杆占用率低的问题
if nozzle_cycle[head] == '':
nozzle_change = 0
else:
nozzle_change = 2 * (nozzle != nozzle_cycle[head])
if cycle_index + 1 < len(nozzle_mode):
nozzle_change += 2 * (nozzle != nozzle_mode[cycle_index + 1][head])
nozzle_change -= prev_nozzle_change
val = e_gang_pick * gang_pick_change - e_nz_change * nozzle_change
if val < value_increment_base:
continue
component_counter += 1
scan_part[head] = part
scan_cycle[head] = component_points[part] // preview_scan_part[part]
scan_slot[head] = slot + head * interval_ratio
scan_nozzle_limit[nozzle] -= 1
nozzle_counter = 0 # 吸嘴更换次数
# 上一周期
for head, nozzle in enumerate(nozzle_cycle):
if scan_part[head] == -1:
continue
if component_data.loc[scan_part[head]]['nz'] != nozzle and nozzle != '':
nozzle_counter += 2
# 下一周期(额外增加的吸嘴更换次数)
if cycle_index + 1 < len(nozzle_mode):
for head, nozzle in enumerate(nozzle_mode[cycle_index + 1]):
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]]['nz'] != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
else:
for head, nozzle in enumerate(nozzle_mode[0]):
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]]['nz'] != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
if component_counter == 0: # 当前情形下未扫描到任何元件
continue
search_break = False
scan_part_head = defaultdict(list)
for head, part in enumerate(scan_part):
if part == -1:
continue
scan_part_head[part].append(head)
for part, heads in scan_part_head.items():
part_cycle = component_points[part] // len(heads)
for head in heads:
scan_cycle[head] = part_cycle
# 计算扫描后的代价函数,记录扫描后的最优解
# 短期收益
cycle = min(filter(lambda x: x > 0, scan_cycle))
gang_pick_counter, gang_pick_slot_set = 0, set()
for head, pick_slot in enumerate(scan_slot):
gang_pick_slot_set.add(pick_slot - head * interval_ratio)
eval_func_short_term = e_gang_pick * (max_head_index - scan_slot.count(-1) - len(
gang_pick_slot_set)) * cycle - e_nz_change * nozzle_counter
# 长期收益
gang_pick_slot_dict = defaultdict(list)
for head, pick_slot in enumerate(scan_slot):
if pick_slot == -1:
continue
gang_pick_slot_dict[pick_slot - head * interval_ratio].append(scan_cycle[head])
eval_func_long_term = 0
for pick_cycle in gang_pick_slot_dict.values():
while pick_cycle:
min_cycle = min(pick_cycle)
eval_func_long_term += e_gang_pick * (len(pick_cycle) - 1) * min(pick_cycle)
pick_cycle = list(map(lambda c: c - min_cycle, pick_cycle))
pick_cycle = list(filter(lambda c: c > 0, pick_cycle))
eval_func_long_term -= e_nz_change * nozzle_counter
ratio = 0.5
eval_func = (1 - ratio) * eval_func_short_term + ratio * eval_func_long_term
if eval_func >= scan_eval_func:
scan_eval_func = eval_func
best_scan_part, best_scan_cycle = scan_part.copy(), scan_cycle.copy()
best_scan_slot = scan_slot.copy()
best_scan_nozzle_limit = copy.deepcopy(scan_nozzle_limit)
if search_break:
break
scan_eval_func_list.append(scan_eval_func)
cur_scan_part = best_scan_part.copy()
cur_scan_slot = best_scan_slot.copy()
cur_scan_cycle = best_scan_cycle.copy()
cur_nozzle_limit = copy.deepcopy(best_scan_nozzle_limit)
if len(scan_eval_func_list) != 0:
if sum(scan_eval_func_list) >= best_assigned_eval_func:
best_assigned_eval_func = sum(scan_eval_func_list)
assigned_part = cur_scan_part.copy()
assigned_slot = cur_scan_slot.copy()
assigned_cycle = cur_scan_cycle.copy()
nozzle_insert_cycle = cycle_index
# 从供料器基座中移除对应数量的贴装点
nonzero_cycle = [cycle for cycle in assigned_cycle if cycle > 0]
if not nonzero_cycle:
value_increment_base -= max_head_index
continue
for head, slot in enumerate(assigned_slot):
if assigned_part[head] == -1:
continue
component_points[feeder_part[slot]] -= min(nonzero_cycle)
component_result.insert(nozzle_insert_cycle, assigned_part)
cycle_result.insert(nozzle_insert_cycle, min(nonzero_cycle))
feeder_slot_result.insert(nozzle_insert_cycle, assigned_slot)
# 更新吸嘴匹配模式
cycle_nozzle = nozzle_mode[nozzle_insert_cycle].copy()
for head, component in enumerate(assigned_part):
if component == -1:
continue
cycle_nozzle[head] = component_data.loc[component]['nz']
nozzle_mode.insert(nozzle_insert_cycle + 1, cycle_nozzle)
pbar.update(len(pcb_data) - sum(component_points) - pbar_prev)
pbar_prev = len(pcb_data) - sum(component_points)
if sum(component_points) == 0:
break
return component_result, cycle_result, feeder_slot_result

View File

@@ -0,0 +1,69 @@
# 用于提供对外接口
from base_optimizer.smopt_scanbased import *
from base_optimizer.smopt_celldivision import *
from base_optimizer.smopt_hybridgenetic import *
from base_optimizer.smopt_feederpriority import *
from base_optimizer.smopt_aggregation import *
from base_optimizer.smopt_twophase import *
from base_optimizer.smopt_mathmodel import *
from base_optimizer.smtopt_route import *
from base_optimizer.result_analysis import *
def base_optimizer(machine_index, pcb_data, component_data, feeder_data, params, hinter=False):
if params.machine_optimizer == 'cell-division': # 基于元胞分裂的遗传算法
component_result, cycle_result, feeder_slot_result = optimizer_celldivision(pcb_data, component_data)
placement_result, head_sequence = place_allocate_sequence_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result)
elif params.machine_optimizer == 'feeder-priority': # 基于基座扫描的供料器优先算法
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
feeder_data)
placement_result, head_sequence = scan_based_placement_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result)
elif params.machine_optimizer == 'hybrid-genetic': # 基于拾取组的混合遗传算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_hybrid_genetic(
pcb_data, component_data, hinter=hinter)
elif params.machine_optimizer == 'aggregation': # 基于batch-level的整数规划 + 启发式算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_aggregation(
component_data, pcb_data, hinter=hinter)
elif params.machine_optimizer == 'genetic-scanning':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_genetic_scanning(
component_data, pcb_data, hinter=hinter)
elif params.machine_optimizer == 'mip-model':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_mathmodel(
component_data, pcb_data, hinter=hinter)
elif params.machine_optimizer == "two-phase":
component_result, feeder_slot_result, cycle_result = gurobi_optimizer(pcb_data, component_data, feeder_data,
initial=True, partition=False,
reduction=True, hinter=hinter)
placement_result, head_sequence = place_allocate_sequence_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result)
else:
raise 'machine optimizer method ' + params.method + ' is not existed'
# print('----- Placement machine ' + str(machine_index) + ' ----- ')
opt_res = OptResult(component_result, cycle_result, feeder_slot_result, placement_result, head_sequence)
# 估算贴装用时
info = placement_info_evaluation(component_data, pcb_data, opt_res, hinter=False)
if hinter:
optimization_assign_result(component_data, pcb_data, opt_res, nozzle_hinter=True, component_hinter=True,
feeder_hinter=True)
info.print()
print('------------------------------ ')
# placement_route_schematic(pcb_data, component_data, opt_res, 1)
if params.save:
output_optimize_result(
f'result/{params.filename[:-4]}-{params.line_optimizer}-M0{machine_index} {params.save_suffix}',
component_data, pcb_data, opt_res)
# output_optimize_result(f'{params.filename[:-4]}', component_data, pcb_data, opt_res)
return info

View File

@@ -1,33 +1,83 @@
from base_optimizer.optimizer_common import *
def convert_pcbdata_to_result(pcb_data, component_data):
opt_res = OptResult()
assigned_part = [-1 for _ in range(max_head_index)]
assigned_slot = [-1 for _ in range(max_head_index)]
assigned_point = [-1 for _ in range(max_head_index)]
assigned_sequence = []
point_num = len(pcb_data) # total mount points num
for point_cnt in range(point_num + 1):
cycle_start = 1 if point_cnt == point_num else pcb_data.loc[point_cnt, 'cs']
if (cycle_start and point_cnt != 0) or -1 not in assigned_part:
if len(opt_res.component_assign) != 0 and opt_res.component_assign[-1] == assigned_part:
opt_res.cycle_assign[-1] += 1
else:
opt_res.component_assign.append(assigned_part)
opt_res.feeder_slot_assign.append(assigned_slot)
opt_res.cycle_assign.append(1)
# assigned_sequence = list(reversed(assigned_sequence)) # Samsung拾取顺序相反
opt_res.placement_assign.append(assigned_point)
opt_res.head_sequence.append(assigned_sequence)
assigned_part = [-1 for _ in range(max_head_index)]
assigned_slot = [-1 for _ in range(max_head_index)]
assigned_point = [-1 for _ in range(max_head_index)]
assigned_sequence = []
if point_cnt == point_num:
break
slot = pcb_data.loc[point_cnt, 'fdr'].split(' ')[0]
if slot == 'A':
slot, part = 0, pcb_data.loc[point_cnt].part
else:
slot, part = int(slot[1:]), pcb_data.loc[point_cnt].fdr.split(' ', 1)[1]
head = pcb_data.loc[point_cnt].hd - 1
part_index = component_data[component_data.part == part].index.tolist()[0]
assigned_part[head] = part_index
assigned_slot[head] = slot
assigned_point[head] = point_cnt
assigned_sequence.append(head)
return opt_res
# 绘制各周期从供料器周期拾取的元件位置
def pickup_cycle_schematic(feeder_slot_result, cycle_result):
def pickup_cycle_schematic(optimizer_result):
plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
# data
bar_width = .7
feeder_part = np.zeros(int(max_slot_index / 2), dtype=np.int)
for cycle in range(len(feeder_slot_result)):
for cycle in range(len(optimizer_result.feeder_slot_assign)):
label_str = '周期' + str(cycle + 1)
cur_feeder_part = np.zeros(int(max_slot_index / 2), dtype=np.int)
for slot in feeder_slot_result[cycle]:
for slot in optimizer_result.feeder_slot_assign[cycle]:
if slot > 0:
cur_feeder_part[slot] += cycle_result[cycle]
cur_feeder_part[slot] += optimizer_result.cycle_assign[cycle]
plt.bar(np.arange(max_slot_index / 2), cur_feeder_part, bar_width, edgecolor='black', bottom=feeder_part,
label=label_str)
for slot in feeder_slot_result[cycle]:
for slot in optimizer_result.feeder_slot_assign[cycle]:
if slot > 0:
feeder_part[slot] += cycle_result[cycle]
feeder_part[slot] += optimizer_result.cycle_assign[cycle]
plt.legend()
plt.show()
def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_slot_result, placement_result,
head_sequence, cycle=-1):
def placement_route_schematic(pcb_data, component_data, optimizer_result, cycle=-1):
plt.figure('cycle {}'.format(cycle + 1))
pos_x, pos_y = [], []
@@ -37,8 +87,8 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
# plt.text(pcb_data.loc[i]['x'], pcb_data.loc[i]['y'] + 0.1, '%d' % i, ha='center', va = 'bottom', size = 8)
mount_pos = []
for head in head_sequence[cycle]:
index = placement_result[cycle][head]
for head in optimizer_result.head_sequence[cycle]:
index = optimizer_result.placement_assign[cycle][head]
plt.text(pos_x[index], pos_y[index] + 0.1, 'HD%d' % (head + 1), ha='center', va='bottom', size=10)
plt.plot([pos_x[index], pos_x[index] - head * head_interval], [pos_y[index], pos_y[index]], linestyle='-.',
color='black', linewidth=1)
@@ -53,9 +103,9 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
linewidth=1)
draw_x, draw_y = [], []
for c in range(cycle, len(placement_result)):
for c in range(cycle, len(optimizer_result.placement_assign)):
for h in range(max_head_index):
i = placement_result[c][h]
i = optimizer_result.placement_assign[c][h]
if i == -1:
continue
draw_x.append(pcb_data.loc[i]['x'] + stopper_pos[0])
@@ -72,18 +122,18 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
feeder_part, feeder_counter = {}, {}
placement_cycle = 0
for cycle_, components in enumerate(component_result):
for cycle_, components in enumerate(optimizer_result.component_assign):
for head, component in enumerate(components):
if component == -1:
continue
placement = placement_result[placement_cycle][head]
slot = feeder_slot_result[cycle_][head]
feeder_part[slot] = pcb_data.loc[placement]['part']
placement = optimizer_result.placement_assign[placement_cycle][head]
slot = optimizer_result.feeder_slot_assign[cycle_][head]
feeder_part[slot] = pcb_data.loc[placement]['part'] + component_data.iloc[optimizer_result.component_assign[cycle_][head]].fdr
if slot not in feeder_counter.keys():
feeder_counter[slot] = 0
feeder_counter[slot] += cycle_result[cycle_]
placement_cycle += cycle_result[cycle_]
feeder_counter[slot] += optimizer_result.cycle_assign[cycle_]
placement_cycle += optimizer_result.cycle_assign[cycle_]
for slot, part in feeder_part.items():
plt.text(slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1] + 15,
@@ -101,9 +151,9 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
# 绘制拾取路径
pick_slot = []
cycle_group = 0
while sum(cycle_result[0: cycle_group + 1]) < cycle:
while sum(optimizer_result.cycle_assign[0: cycle_group + 1]) < cycle:
cycle_group += 1
for head, slot in enumerate(feeder_slot_result[cycle_group]):
for head, slot in enumerate(optimizer_result.feeder_slot_assign[cycle_group]):
if slot == -1:
continue
pick_slot.append(slot - head * interval_ratio)
@@ -112,10 +162,10 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
next_cycle_group = 0
next_pick_slot = max_slot_index
while sum(cycle_result[0: next_cycle_group + 1]) < cycle + 1:
while sum(optimizer_result.cycle_assign[0: next_cycle_group + 1]) < cycle + 1:
next_cycle_group += 1
if next_cycle_group < len(feeder_slot_result):
for head, slot in enumerate(feeder_slot_result[cycle_group]):
if next_cycle_group < len(optimizer_result.feeder_slot_assign):
for head, slot in enumerate(optimizer_result.feeder_slot_assign[cycle_group]):
if slot == -1:
continue
next_pick_slot = min(next_pick_slot, slot - head * interval_ratio)
@@ -133,8 +183,7 @@ def placement_route_schematic(pcb_data, component_result, cycle_result, feeder_s
plt.show()
def save_placement_route_figure(file_name, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence):
def save_placement_route_figure(file_name, pcb_data, optimizer_result):
path = 'result/' + file_name[:file_name.find('.')]
if not os.path.exists(path):
os.mkdir(path)
@@ -147,12 +196,12 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
with tqdm(total=100) as pbar:
pbar.set_description('save figure')
for cycle in range(len(placement_result)):
for cycle in range(len(optimizer_result.placement_assign)):
plt.figure(cycle)
mount_pos = []
for head in head_sequence[cycle]:
index = placement_result[cycle][head]
for head in optimizer_result.head_sequence[cycle]:
index = optimizer_result.placement_assign[cycle][head]
plt.text(pos_x[index], pos_y[index] + 0.1, 'HD%d' % (head + 1), ha='center', va='bottom', size=10)
plt.plot([pos_x[index], pos_x[index] - head * head_interval], [pos_y[index], pos_y[index]],
linestyle='-.', color='black', linewidth=1)
@@ -165,9 +214,9 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
linewidth=1)
draw_x, draw_y = [], []
for c in range(cycle, len(placement_result)):
for c in range(cycle, len(optimizer_result.placement_assign)):
for h in range(max_head_index):
i = placement_result[c][h]
i = optimizer_result.placement_assign[c][h]
if i == -1:
continue
draw_x.append(pcb_data.loc[i]['x'] + stopper_pos[0])
@@ -183,18 +232,18 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
feeder_part, feeder_counter = {}, {}
placement_cycle = 0
for cycle_, components in enumerate(component_result):
for cycle_, components in enumerate(optimizer_result.component_assign):
for head, component in enumerate(components):
if component == -1:
continue
placement = placement_result[placement_cycle][head]
slot = feeder_slot_result[cycle_][head]
placement = optimizer_result.placement_assign[placement_cycle][head]
slot = optimizer_result.feeder_slot_assign[cycle_][head]
feeder_part[slot] = pcb_data.loc[placement]['part']
if slot not in feeder_counter.keys():
feeder_counter[slot] = 0
feeder_counter[slot] += cycle_result[cycle_]
placement_cycle += cycle_result[cycle_]
feeder_counter[slot] += optimizer_result.cycle_assign[cycle_]
placement_cycle += optimizer_result.cycle_assign[cycle_]
for slot, part in feeder_part.items():
plt.text(slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1] + 15,
@@ -214,9 +263,9 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
# 绘制拾取路径
pick_slot = []
cycle_group = 0
while sum(cycle_result[0: cycle_group + 1]) < cycle:
while sum(optimizer_result.cycle_assign[0: cycle_group + 1]) < cycle:
cycle_group += 1
for head, slot in enumerate(feeder_slot_result[cycle_group]):
for head, slot in enumerate(optimizer_result.feeder_slot_assign[cycle_group]):
if slot == -1:
continue
pick_slot.append(slot - head * interval_ratio)
@@ -234,46 +283,31 @@ def save_placement_route_figure(file_name, pcb_data, component_result, cycle_res
plt.savefig(path + '/cycle_{}'.format(cycle + 1))
plt.close(cycle)
pbar.update(100 / len(placement_result))
pbar.update(100 / len(optimizer_result.placement_assign))
def output_optimize_result(file_name, method, component_data, pcb_data, feeder_data, component_result, cycle_result,
feeder_slot_result, placement_result, head_sequence):
assert len(component_result) == len(feeder_slot_result)
if feeder_data is None:
warning_info = 'file: ' + file_name + ' optimize result is not existed!'
warnings.warn(warning_info, UserWarning)
return
def output_optimize_result(file_path, component_data, pcb_data, optimizer_result):
assert len(optimizer_result.component_assign) == len(optimizer_result.feeder_slot_assign)
output_data = pcb_data.copy(deep=True)
# 默认ANC参数
anc_list = defaultdict(list)
anc_list['CN065'] = list(range(14, 25, 2))
anc_list['CN220'] = list(range(15, 26, 2))
anc_list['CN020'] = list(range(15, 26, 2))
anc_list['CN140'] = list(range(26, 37, 2))
anc_list['CN400'] = list(range(27, 38, 2))
# 更新供料器组参数
for cycle_set in range(len(cycle_result)):
for head, component in enumerate(component_result[cycle_set]):
if component == -1:
continue
if feeder_data[feeder_data['slot'] == feeder_slot_result[cycle_set][head]].index.empty:
part = component_data.loc[component]['part']
feeder_data.loc[len(feeder_data.index)] = [feeder_slot_result[cycle_set][head], part, 0]
feeder_data.sort_values('slot', inplace=True, ascending=True, ignore_index=True)
anc_list['CN040'] = list(range(27, 38, 2))
placement_index = []
assigned_nozzle, assigned_anc_hole = ['' for _ in range(max_head_index)], [-1 for _ in range(max_head_index)]
for cycle_set in range(len(cycle_result)):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle_set in range(len(optimizer_result.cycle_assign)):
floor_cycle, ceil_cycle = sum(optimizer_result.cycle_assign[:cycle_set]), sum(optimizer_result.cycle_assign[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
cycle_start = True
cycle_nozzle = ['' for _ in range(max_head_index)]
head_indexes = [-1 for _ in range(max_head_index)]
for head in head_sequence[cycle]:
index_ = placement_result[cycle][head]
for head in optimizer_result.head_sequence[cycle]:
index_ = optimizer_result.placement_assign[cycle][head]
if index_ == -1:
continue
head_indexes[head] = index_
@@ -286,14 +320,14 @@ def output_optimize_result(file_name, method, component_data, pcb_data, feeder_d
cycle_start = False
# 供料器信息
slot = feeder_slot_result[cycle_set][head]
slot = optimizer_result.feeder_slot_assign[cycle_set][head]
fdr = 'F' + str(slot) if slot < max_slot_index // 2 else 'R' + str(slot - max_slot_index // 2)
feeder_index = feeder_data[feeder_data['slot'] == slot].index.tolist()[0]
output_data.loc[index_, 'fdr'] = fdr + ' ' + feeder_data.loc[feeder_index, 'part']
output_data.loc[index_, 'fdr'] = fdr + ' ' + component_data.loc[
optimizer_result.component_assign[cycle_set][head], 'part']
# ANC信息
cycle_nozzle[head] = component_data.loc[component_result[cycle_set][head], 'nz']
cycle_nozzle[head] = component_data.loc[optimizer_result.component_assign[cycle_set][head], 'nz']
for head in range(max_head_index):
nozzle = cycle_nozzle[head]
@@ -322,51 +356,21 @@ def output_optimize_result(file_name, method, component_data, pcb_data, feeder_d
column_index = int(np.where(output_data.columns.values.reshape(-1) == 'part')[0][0])
output_data.insert(loc=column_index + 1, column='desc', value='')
if not os.path.exists('result/' + method):
os.makedirs('result/' + method)
file_name = method + '/' + file_name.split('.')[0] + '.xlsx'
output_data.to_excel('result/' + file_name, sheet_name='tb1', float_format='%.3f', na_rep='')
output_data.to_csv('result/' + file_path + '.txt', sep='\t', float_format='%.3f', header=False, index=False)
def component_assign_evaluate(component_data, component_result, cycle_result, feeder_slot_result) -> float:
nozzle_change_counter = 0
for head in range(max_head_index):
nozzle = ''
for cycle in range(len(component_result)):
component_index = component_result[cycle][head]
if component_index == -1:
continue
if cycle != 0 and nozzle != component_data.loc[component_index, 'nz']:
nozzle_change_counter += 1
nozzle = component_data.loc[component_index, 'nz']
gang_pick_counter = 0
for cycle, feeder_slot in enumerate(feeder_slot_result):
pick_slot = defaultdict(int)
for head, slot in enumerate(feeder_slot):
if slot == -1:
continue
pick_slot[slot - head * interval_ratio] += 1
for _ in pick_slot.values():
gang_pick_counter += cycle_result[cycle]
return sum(cycle_result) + e_nz_change * nozzle_change_counter + e_gang_pick * gang_pick_counter
def optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=False, component_hinter=False, feeder_hinter=False):
def optimization_assign_result(component_data, pcb_data, optimizer_result, nozzle_hinter=False, component_hinter=False,
feeder_hinter=False, placement_hinter=False):
if nozzle_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
nozzle_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
for cycle, components in enumerate(optimizer_result.component_assign):
nozzle_assign_row = len(nozzle_assign)
nozzle_assign.loc[nozzle_assign_row, 'cycle'] = cycle_result[cycle]
nozzle_assign.loc[nozzle_assign_row, 'cycle'] = optimizer_result.cycle_assign[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
index = optimizer_result.component_assign[cycle][head]
if index == -1:
nozzle_assign.loc[nozzle_assign_row, 'H{}'.format(head + 1)] = ''
else:
@@ -388,15 +392,15 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
component_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(component_result):
component_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for cycle, components in enumerate(optimizer_result.component_assign):
component_assign.loc[cycle, 'cycle'] = optimizer_result.cycle_assign[cycle]
for head in range(max_head_index):
index = component_result[cycle][head]
index = optimizer_result.component_assign[cycle][head]
if index == -1:
component_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
else:
part = component_data.loc[index]['part']
component_assign.loc[cycle, 'H{}'.format(head + 1)] = part
# component_assign.loc[cycle, 'H{}'.format(head + 1)] = component_data.loc[index]['part']
component_assign.loc[cycle, 'H{}'.format(head + 1)] = 'C' + str(index)
print(component_assign)
print('')
@@ -405,36 +409,65 @@ def optimization_assign_result(component_data, pcb_data, component_result, cycle
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
feedr_assign = pd.DataFrame(columns=columns)
for cycle, components in enumerate(feeder_slot_result):
feedr_assign.loc[cycle, 'cycle'] = cycle_result[cycle]
for cycle, components in enumerate(optimizer_result.feeder_slot_assign):
feedr_assign.loc[cycle, 'cycle'] = optimizer_result.cycle_assign[cycle]
for head in range(max_head_index):
slot = feeder_slot_result[cycle][head]
slot = optimizer_result.feeder_slot_assign[cycle][head]
if slot == -1:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'A'
else:
try:
feedr_assign.loc[cycle, 'H{}'.format(head + 1)] = 'F{}'.format(
slot) if slot <= max_slot_index // 2 else 'R{}'.format(slot - max_head_index)
except:
print('')
print(feedr_assign)
print('')
if placement_hinter:
columns = ['H{}'.format(i + 1) for i in range(max_head_index)] + ['cycle']
def placement_time_estimate(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence, hinter=True) -> float:
placement_assign = pd.DataFrame(columns=columns)
for cycle, _ in enumerate(optimizer_result.placement_assign):
placement_assign.loc[cycle, 'cycle'] = 1
for head in range(max_head_index):
point = optimizer_result.placement_assign[cycle][head]
if point != -1:
placement_assign.loc[cycle, 'H{}'.format(head + 1)] = 'P{}'.format(point)
else:
placement_assign.loc[cycle, 'H{}'.format(head + 1)] = ''
headseq_assign = pd.DataFrame(columns=columns)
for cycle, headseq in enumerate(optimizer_result.head_sequence):
headseq_assign.loc[cycle, 'cycle'] = 1
for head in range(len(headseq)):
headseq_assign.loc[cycle, 'H{}'.format(head + 1)] = 'H{}'.format(headseq[head])
print(placement_assign)
print(headseq_assign)
print('')
def placement_info_evaluation(component_data, pcb_data, optimizer_result, hinter=False):
# === 优化结果参数 ===
info = OptInfo()
# === 校验 ===
total_points = 0
for cycle, components in enumerate(component_result):
info.total_points = 0
for cycle, components in enumerate(optimizer_result.component_assign):
for head, component in enumerate(components):
if component == -1:
continue
total_points += cycle_result[cycle]
info.total_points += optimizer_result.cycle_assign[cycle]
if total_points != len(pcb_data):
if info.total_points != len(pcb_data):
warning_info = 'the number of placement points is not match with the PCB data. '
warnings.warn(warning_info, UserWarning)
return 0.
return OptInfo()
for placements in placement_result:
if optimizer_result.placement_assign:
total_points = info.total_points
for placements in optimizer_result.placement_assign:
for placement in placements:
if placement == -1:
continue
@@ -444,51 +477,47 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
warnings.warn(
'the optimization result of component assignment result and placement result are not consistent. ',
UserWarning)
return 0.
return OptInfo()
feeder_arrangement = defaultdict(set)
for cycle, feeder_slots in enumerate(feeder_slot_result):
for cycle, feeder_slots in enumerate(optimizer_result.feeder_slot_assign):
for head, slot in enumerate(feeder_slots):
if slot == -1:
continue
feeder_arrangement[component_result[cycle][head]].add(slot)
feeder_arrangement[optimizer_result.component_assign[cycle][head]].add(slot)
info.total_components = len(feeder_arrangement.keys())
for part, data in component_data.iterrows():
if part in feeder_arrangement.keys() and data['feeder-limit'] < len(feeder_arrangement[part]):
if part in feeder_arrangement.keys() and data.fdn < len(feeder_arrangement[part]):
info = 'the number of arranged feeder of [' + data['part'] + '] exceeds the quantity limit'
warnings.warn(info, UserWarning)
return 0.
total_pickup_time, total_round_time, total_place_time = .0, .0, 0 # 拾取用时、往返用时、贴装用时
total_operation_time = .0 # 操作用时
total_nozzle_change_counter = 0 # 总吸嘴更换次数
total_pick_counter = 0 # 总拾取次数
total_mount_distance, total_pick_distance = .0, .0 # 贴装距离、拾取距离
total_distance = 0 # 总移动距离
cur_pos, next_pos = anc_marker_pos, [0, 0] # 贴装头当前位置
# 初始化首个周期的吸嘴装配信息
nozzle_assigned = ['Empty' for _ in range(max_head_index)]
for head in range(max_head_index):
for cycle in range(len(component_result)):
idx = component_result[cycle][head]
for cycle in range(len(optimizer_result.component_assign)):
idx = optimizer_result.component_assign[cycle][head]
if idx == -1:
continue
else:
nozzle_assigned[head] = component_data.loc[idx]['nz']
break
for cycle_set, _ in enumerate(component_result):
floor_cycle, ceil_cycle = sum(cycle_result[:cycle_set]), sum(cycle_result[:(cycle_set + 1)])
for cycle_set, _ in enumerate(optimizer_result.component_assign):
floor_cycle, ceil_cycle = sum(optimizer_result.cycle_assign[:cycle_set]), sum(optimizer_result.cycle_assign[:(cycle_set + 1)])
for cycle in range(floor_cycle, ceil_cycle):
if sum(optimizer_result.component_assign[cycle_set]) == -max_head_index:
continue
pick_slot, mount_pos, mount_angle = [], [], []
nozzle_pick_counter, nozzle_put_counter = 0, 0 # 吸嘴更换次数统计(拾取/放置分别算一次)
for head in range(max_head_index):
if feeder_slot_result[cycle_set][head] != -1:
pick_slot.append(feeder_slot_result[cycle_set][head] - interval_ratio * head)
if component_result[cycle_set][head] == -1:
if optimizer_result.feeder_slot_assign[cycle_set][head] != -1:
pick_slot.append(optimizer_result.feeder_slot_assign[cycle_set][head] - interval_ratio * head)
if optimizer_result.component_assign[cycle_set][head] == -1:
continue
nozzle = component_data.loc[component_result[cycle_set][head]]['nz']
nozzle = component_data.loc[optimizer_result.component_assign[cycle_set][head]]['nz']
if nozzle != nozzle_assigned[head]:
if nozzle_assigned[head] != 'Empty':
nozzle_put_counter += 1
@@ -500,9 +529,9 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
next_pos = anc_marker_pos
move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
axis_moving_time(cur_pos[1] - next_pos[1], 1))
total_round_time += move_time
total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
info.round_time += move_time
info.anc_round_counter += 1
info.total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
cur_pos = next_pos
pick_slot = list(set(pick_slot))
@@ -514,95 +543,89 @@ def placement_time_estimate(component_data, pcb_data, component_result, cycle_re
next_pos = [slotf1_pos[0] + slot_interval * (slot - 1), slotf1_pos[1]]
else:
next_pos = [slotr1_pos[0] - slot_interval * (max_slot_index - slot - 1), slotr1_pos[1]]
total_operation_time += t_pick
total_pick_counter += 1
info.operation_time += t_pick
info.pickup_counter += 1
move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
axis_moving_time(cur_pos[1] - next_pos[1], 1))
if idx == 0:
total_round_time += move_time
info.round_time += move_time
else:
total_pickup_time += move_time
info.pickup_time += move_time
total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
info.total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
if slot != pick_slot[0]:
total_pick_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
info.pickup_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
cur_pos = next_pos
# 固定相机检测
for head in range(max_head_index):
if component_result[cycle_set][head] == -1:
continue
camera = component_data.loc[component_result[cycle_set][head]]['camera']
if camera == '固定相机':
next_pos = [fix_camera_pos[0] - head * head_interval, fix_camera_pos[1]]
move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
axis_moving_time(cur_pos[1] - next_pos[1], 1))
total_round_time += move_time
total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
total_operation_time += t_fix_camera_check
cur_pos = next_pos
# for head in range(max_head_index):
# if optimizer_result.component_assign[cycle_set][head] == -1:
# continue
# camera = component_data.loc[optimizer_result.component_assign[cycle_set][head]]['camera']
# if camera == '固定相机':
# next_pos = [fix_camera_pos[0] - head * head_interval, fix_camera_pos[1]]
# move_time = max(axis_moving_time(cur_pos[0] - next_pos[0], 0),
# axis_moving_time(cur_pos[1] - next_pos[1], 1))
# info.round_time += move_time
#
# info.total_distance += max(abs(cur_pos[0] - next_pos[0]), abs(cur_pos[1] - next_pos[1]))
# info.operation_time += t_fix_camera_check
# cur_pos = next_pos
# 贴装路径
for head in head_sequence[cycle]:
index = placement_result[cycle][head]
if optimizer_result.placement_assign and optimizer_result.head_sequence:
head_angle = [0 for _ in range(max_head_index)]
for head in optimizer_result.head_sequence[cycle]:
index = optimizer_result.placement_assign[cycle][head]
if index == -1:
continue
mount_pos.append([pcb_data.loc[index]['x'] - head * head_interval + stopper_pos[0],
pcb_data.loc[index]['y'] + stopper_pos[1]])
mount_angle.append(pcb_data.loc[index]['r'])
mount_pos.append([pcb_data.iloc[index]['x'] - head * head_interval + stopper_pos[0],
pcb_data.iloc[index]['y'] + stopper_pos[1]])
head_angle[head] = pcb_data.iloc[index]['r']
# 单独计算贴装路径
for cntPoints in range(len(mount_pos) - 1):
total_mount_distance += max(abs(mount_pos[cntPoints][0] - mount_pos[cntPoints + 1][0]),
info.place_distance += max(abs(mount_pos[cntPoints][0] - mount_pos[cntPoints + 1][0]),
abs(mount_pos[cntPoints][1] - mount_pos[cntPoints + 1][1]))
# 考虑R轴预旋转补偿同轴角度转动带来的额外贴装用时
total_operation_time += head_rotary_time(mount_angle[0]) # 补偿角度转动带来的额外贴装用时
total_operation_time += t_nozzle_put * nozzle_put_counter + t_nozzle_pick * nozzle_pick_counter
for idx, pos in enumerate(mount_pos):
total_operation_time += t_place
move_time = max(axis_moving_time(cur_pos[0] - pos[0], 0), axis_moving_time(cur_pos[1] - pos[1], 1))
if idx == 0:
total_round_time += move_time
else:
total_place_time += move_time
if mount_pos[0][0] < mount_pos[-1][0]:
mount_pos = reversed(mount_pos)
total_distance += max(abs(cur_pos[0] - pos[0]), abs(cur_pos[1] - pos[1]))
# 考虑R轴预旋转补偿同轴角度转动带来的额外贴装用时
info.operation_time += t_nozzle_put * nozzle_put_counter + t_nozzle_pick * nozzle_pick_counter
for idx, pos in enumerate(mount_pos):
info.operation_time += t_place
if idx == 0:
move_time = max(axis_moving_time(cur_pos[0] - pos[0], 0),
axis_moving_time(cur_pos[1] - pos[1], 1))
info.round_time += move_time
else:
cur_head = optimizer_result.head_sequence[cycle][idx]
side_head = cur_head - 1 if cur_head % 2 else cur_head + 1
if optimizer_result.head_sequence[cycle][idx - 1] != side_head:
move_time = max(axis_moving_time(cur_pos[0] - pos[0], 0),
axis_moving_time(cur_pos[1] - pos[1], 1))
else:
move_time = max(axis_moving_time(cur_pos[0] - pos[0], 0),
axis_moving_time(cur_pos[1] - pos[1], 1),
head_rotary_time(head_angle[cur_head] - head_angle[side_head]))
info.place_time += move_time
info.total_distance += max(abs(cur_pos[0] - pos[0]), abs(cur_pos[1] - pos[1]))
cur_pos = pos
total_nozzle_change_counter += nozzle_put_counter + nozzle_pick_counter
total_time = total_pickup_time + total_round_time + total_place_time + total_operation_time
minutes, seconds = int(total_time // 60), int(total_time) % 60
millisecond = int((total_time - minutes * 60 - seconds) * 60)
info.nozzle_change_counter += nozzle_put_counter + nozzle_pick_counter
info.total_time = info.pickup_time + info.round_time + info.place_time + info.operation_time
info.cycle_counter = sum(optimizer_result.cycle_assign)
if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=False, component_hinter=False, feeder_hinter=False)
print('-Cycle counter: {}'.format(sum(cycle_result)))
print('-Nozzle change counter: {}'.format(total_nozzle_change_counter // 2))
print('-Pick operation counter: {}'.format(total_pick_counter))
print('-Expected mounting tour length: {} mm'.format(total_mount_distance))
print('-Expected picking tour length: {} mm'.format(total_pick_distance))
print('-Expected total tour length: {} mm'.format(total_distance))
print('-Expected total moving time: {} s with pick: {}, round: {}, place = {}'.format(
total_pickup_time + total_round_time + total_place_time, total_pickup_time, total_round_time,
total_place_time))
print('-Expected total operation time: {} s'.format(total_operation_time))
if minutes > 0:
print('-Mounting time estimation: {:d} min {} s {:2d} ms ({:.3f}s)'.format(minutes, seconds, millisecond,
total_time))
else:
print('-Mounting time estimation: {} s {:2d} ms ({:.3f}s)'.format(seconds, millisecond, total_time))
return total_time
optimization_assign_result(component_data, pcb_data, optimizer_result, nozzle_hinter=False,
component_hinter=False, feeder_hinter=False)
info.print()
return info

View File

@@ -1,15 +1,12 @@
from base_optimizer.optimizer_common import *
from gurobipy import *
from collections import defaultdict
from base_optimizer.smtopt_route import *
def list_range(start, end=None):
return list(range(start)) if end is None else list(range(start, end))
@timer_wrapper
def optimizer_aggregation(component_data, pcb_data):
def optimizer_aggregation(component_data, pcb_data, hinter=True):
# === phase 0: data preparation ===
M = 1000 # a sufficient large number
a, b = 1, 6 # coefficient
@@ -46,7 +43,7 @@ def optimizer_aggregation(component_data, pcb_data):
# === phase 1: mathematical model solver ===
mdl = Model('SMT')
mdl.setParam('OutputFlag', 0)
mdl.setParam('OutputFlag', hinter)
# === Decision Variables ===
# the number of components of type i that are placed by nozzle type j on placement head k
@@ -107,8 +104,7 @@ def optimizer_aggregation(component_data, pcb_data):
mdl.setParam("TimeLimit", 100)
mdl.optimize()
if mdl.Status == GRB.OPTIMAL:
if mdl.Status == GRB.OPTIMAL or mdl.Status == GRB.TIME_LIMIT:
print('total cost = {}'.format(mdl.objval))
# convert cp model solution to standard output
@@ -163,25 +159,9 @@ def optimizer_aggregation(component_data, pcb_data):
feeder_slot_result = feeder_assignment(component_data, pcb_data, component_result, cycle_result)
# === phase 2: heuristic method ===
mount_point_pos = defaultdict(list)
for pcb_idx, data in pcb_data.iterrows():
part = data['part']
part_index = component_data[component_data['part'] == part].index.tolist()[0]
mount_point_pos[part_index].append([data['x'], data['y'], pcb_idx])
for index_ in mount_point_pos.keys():
mount_point_pos[index_].sort(key=lambda x: (x[1], x[0]))
for cycle_idx, _ in enumerate(cycle_result):
for _ in range(cycle_result[cycle_idx]):
placement_result.append([-1 for _ in range(max_head_index)])
for head in range(max_head_index):
if component_result[cycle_idx][head] == -1:
continue
index_ = component_result[cycle_idx][head]
placement_result[-1][head] = mount_point_pos[index_][-1][2]
mount_point_pos[index_].pop()
head_sequence.append(dynamic_programming_cycle_path(pcb_data, placement_result[-1], feeder_slot_result[cycle_idx]))
placement_result, head_sequence = greedy_level_placing_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result)
else:
warnings.warn('No solution found!', UserWarning)

View File

@@ -1,5 +1,30 @@
from base_optimizer.optimizer_common import *
from result_analysis import *
def component_assign_evaluate(component_data, component_result, cycle_result, feeder_slot_result) -> float:
nozzle_change_counter = 0
for head in range(max_head_index):
nozzle = ''
for cycle in range(len(component_result)):
component_index = component_result[cycle][head]
if component_index == -1:
continue
if cycle != 0 and nozzle != component_data.loc[component_index, 'nz']:
nozzle_change_counter += 1
nozzle = component_data.loc[component_index, 'nz']
gang_pick_counter = 0
for cycle, feeder_slot in enumerate(feeder_slot_result):
pick_slot = defaultdict(int)
for head, slot in enumerate(feeder_slot):
if slot == -1:
continue
pick_slot[slot - head * interval_ratio] += 1
for _ in pick_slot.values():
gang_pick_counter += cycle_result[cycle]
return sum(cycle_result) + e_nz_change * nozzle_change_counter + e_gang_pick * gang_pick_counter
def convert_cell_2_result(pcb_data, component_data, component_cell, population):
@@ -8,7 +33,7 @@ def convert_cell_2_result(pcb_data, component_data, component_cell, population):
wl = [0 for _ in range(max_head_index)] # workload
e1, e2, e3 = 1, 0.5, 1. / 6
e1, e2, e3 = 1, 2, 1. / 6
component_result, cycle_result, feeder_slot_result = [], [], []
for index in population:
@@ -76,12 +101,16 @@ def optimizer_celldivision(pcb_data, component_data, hinter=True):
golden_section = 0.618
# 获取元件元胞
point_num = len(pcb_data)
component_cell = pd.DataFrame({'index': np.arange(len(component_data)), 'points': np.zeros(len(component_data), dtype=int)})
for point_cnt in range(point_num):
part = pcb_data.loc[point_cnt, 'fdr'].split(' ', 1)[1]
index = np.where(component_data['part'].values == part)
component_cell.loc[index[0], 'points'] += 1
feeder_num = sum(component_data['fdn'])
component_cell = pd.DataFrame({'index': np.arange(feeder_num), 'points': np.zeros(feeder_num, dtype=int)})
cell_index = 0
for part_index, data in component_data.iterrows():
total_points, division_points = data.points, math.ceil(data.points / data.fdn)
for _ in range(data.fdn):
component_cell.loc[cell_index, 'index'] = part_index
component_cell.loc[cell_index, 'points'] = min(division_points, total_points)
total_points -= division_points
cell_index += 1
component_cell = component_cell[~component_cell['points'].isin([0])]
# component_cell.sort_values(by = "points" , inplace = True, ascending = False)
@@ -98,12 +127,14 @@ def optimizer_celldivision(pcb_data, component_data, hinter=True):
pop_val = []
for pop in range(population_size):
try:
component_result, cycle_result, feeder_slot_result = convert_cell_2_result(pcb_data, component_data,
component_cell,
pop_generation[pop])
pop_val.append(
component_assign_evaluate(component_data, component_result, cycle_result, feeder_slot_result))
except:
pop_val.append(1e4)
# 初始化随机生成种群
Upit = int(1.5 * np.sqrt(len(component_cell)))
@@ -143,11 +174,14 @@ def optimizer_celldivision(pcb_data, component_data, hinter=True):
# 将元件元胞分配到各个吸杆上,计算价值函数
for pop in range(population_size):
try:
component_result, cycle_result, feeder_slot_result = convert_cell_2_result(pcb_data, component_data,
component_cell,
pop_generation[pop])
pop_val[pop] = component_assign_evaluate(component_data, component_result, cycle_result,
feeder_slot_result)
except:
pop_val[pop] = 1e4
assert(pop_val[pop] > 0)
if min(pop_val) < min_pop_val:

View File

@@ -0,0 +1,774 @@
import copy
import math
from functools import reduce
from base_optimizer.optimizer_common import *
from base_optimizer.result_analysis import placement_info_evaluation
@timer_wrapper
def feeder_priority_assignment(component_data, pcb_data, feeder_data, hinter=True):
feeder_allocate_val = None
component_result, cycle_result, feeder_slot_result = None, None, None
nozzle_pattern_list = feeder_nozzle_pattern(component_data)
pbar = tqdm(total=len(nozzle_pattern_list), desc='feeder priority process') if hinter else None
# 第1步确定吸嘴分配模式
for nozzle_pattern in nozzle_pattern_list:
feeder_data_cpy = copy.deepcopy(feeder_data)
# 第2步分配供料器位置
feeder_allocate(component_data, pcb_data, feeder_data_cpy, nozzle_pattern, figure=False)
# 第3步扫描供料器基座确定元件拾取的先后顺序
component_assign, cycle_assign, feeder_slot_assign = feeder_base_scan(component_data, pcb_data, feeder_data_cpy)
info = placement_info_evaluation(component_data, pcb_data, OptResult(component_assign, cycle_assign,
feeder_slot_assign), hinter=False)
val = Fit_cy * info.cycle_counter + Fit_nz * info.nozzle_change_counter + Fit_pu * info.pickup_counter\
+ Fit_mv * info.pickup_distance
if feeder_allocate_val is None or val < feeder_allocate_val:
feeder_allocate_val = val
component_result, cycle_result, feeder_slot_result = component_assign, cycle_assign, feeder_slot_assign
if pbar:
pbar.update(1)
return component_result, cycle_result, feeder_slot_result
def feeder_nozzle_pattern(component_data):
nozzle_pattern_list = []
nozzle_points = defaultdict(int)
for _, data in component_data.iterrows():
if data.points == 0:
continue
nozzle_points[data.nz] += data.points
head_assign_indexes = [int(math.ceil(max_head_index + 0.5) - 4.5 - pow(-1, h) * (math.ceil(h / 2) - 0.5)) for h in
range(1, max_head_index + 1)]
while len(nozzle_points):
nozzle_heads, nozzle_indices = defaultdict(int), defaultdict(str),
min_points_nozzle = None
for idx, (nozzle, points) in enumerate(nozzle_points.items()):
nozzle_heads[nozzle], nozzle_indices[idx] = 1, nozzle
if min_points_nozzle is None or points < nozzle_points[min_points_nozzle]:
min_points_nozzle = nozzle
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
elif nozzle_points[nozzle] / head_num == nozzle_points[max_cycle_nozzle] / nozzle_heads[max_cycle_nozzle]:
if head_num > nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
num_permu = reduce(lambda x, y: x * y, range(1, len(nozzle_indices.keys()) + 1))
num_permu = num_permu // 2 if len(nozzle_indices.keys()) > 3 else num_permu
for permu in itertools.permutations(nozzle_indices.keys()):
if (num_permu := num_permu - 1) < 0:
break
nozzle_pattern_list.append([])
for idx in permu:
for _ in range(nozzle_heads[nozzle_indices[idx]]):
nozzle_pattern_list[-1].append(nozzle_indices[idx])
if len(nozzle_points.keys()) > 1:
nozzle_average_points = []
for nozzle, head in nozzle_heads.items():
nozzle_average_points.append([nozzle, head, nozzle_points[nozzle] / head])
nozzle_average_points = sorted(nozzle_average_points, key=lambda x: -x[2])
idx = 0
nozzle_pattern_list.append(['' for _ in range(max_head_index)])
for nozzle, head, _ in nozzle_average_points:
for _ in range(head):
nozzle_pattern_list[-1][head_assign_indexes[idx]] = nozzle
idx += 1
idx = 1
nozzle_pattern_list.append(['' for _ in range(max_head_index)])
for nozzle, head, _ in nozzle_average_points:
for _ in range(head):
nozzle_pattern_list[-1][head_assign_indexes[-idx]] = nozzle
idx += 1
nozzle_points.pop(min_points_nozzle)
return nozzle_pattern_list
def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figure=False, hinter=True):
feeder_points, feeder_division_points = defaultdict(int), defaultdict(int) # 供料器贴装点数
feeder_center_pos = defaultdict(float)
feeder_limit, feeder_arrange = defaultdict(int), defaultdict(int)
part_nozzle = defaultdict(str)
feeder_base = [-2] * max_slot_index # 已安装在供料器基座上的元件(-2: 未分配,-1: 占用状态)
feeder_base_points = [0] * max_slot_index # 供料器基座结余贴装点数量
component_index = defaultdict(int)
for idx, data in component_data.iterrows():
component_index[data.part] = idx
feeder_limit[idx] = data.fdn
feeder_arrange[idx] = 0
for _, data in pcb_data.iterrows():
pos, part = data.x + stopper_pos[0], data.part
part_index = component_index[part]
feeder_points[part_index] += 1
feeder_center_pos[part_index] += ((pos - feeder_center_pos[part_index]) / feeder_points[part_index])
part_nozzle[part_index] = component_data.loc[part_index].nz
for part_index, points in feeder_points.items():
feeder_division_points[part_index] = points // feeder_limit[part_index]
nozzle_component, nozzle_component_points = defaultdict(list), defaultdict(list)
for part, nozzle in part_nozzle.items():
for _ in range(feeder_limit[part]):
nozzle_component[nozzle].append(part)
nozzle_component_points[nozzle].append(feeder_points[part])
if feeder_data is not None:
for _, feeder in feeder_data.iterrows():
slot, part = feeder.slot, feeder.part
part_index = component_index[part]
# 供料器基座分配位置和对应贴装点数
feeder_base[slot], feeder_base_points[slot] = part_index, feeder_division_points[part_index]
feeder_type = component_data.loc[part_index].fdr
extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval
while extra_width > 0:
slot += 1
feeder_base[slot] = -1
extra_width -= slot_interval
feeder_limit[part_index] -= 1
feeder_arrange[part_index] += 1
if feeder_limit[part_index] < 0:
info = 'the number of arranged feeder for [' + part + '] exceeds the quantity limit'
raise ValueError(info)
for nozzle, components in nozzle_component.items():
if part_index in components:
index_ = components.index(part_index)
nozzle_component[nozzle].pop(index_)
nozzle_component_points[nozzle].pop(index_)
break
head_assign_indexes = [int(math.ceil(max_head_index + 0.5) - 4.5 - pow(-1, h) * (math.ceil(h / 2) - 0.5)) for h in
range(1, max_head_index + 1)]
assert len(nozzle_pattern) == max_head_index
while True:
best_assign, best_assign_points = [], []
best_assign_slot, best_assign_value = -1, -np.Inf
best_nozzle_component, best_nozzle_component_points = None, None
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
feeder_assign, feeder_assign_points = [], []
tmp_feeder_limit, tmp_feeder_points = feeder_limit.copy(), feeder_points.copy()
tmp_nozzle_component, tmp_nozzle_component_points = copy.deepcopy(nozzle_component), copy.deepcopy(
nozzle_component_points)
# 记录扫描到的已安装的供料器元件类型
for head in range(max_head_index):
feeder_assign.append(feeder_base[slot + head * interval_ratio])
if feeder_assign[-1] >= 0:
feeder_assign_points.append(feeder_base_points[slot + head * interval_ratio])
if feeder_assign_points[-1] <= 0:
feeder_assign[-1], feeder_assign_points[-1] = -1, 0
else:
feeder_assign_points.append(0)
if -2 not in feeder_assign:
continue
assign_part_stack, assign_part_stack_points = [], []
for idx in head_assign_indexes:
if feeder_assign[idx] != -2:
continue
# 吸嘴匹配模式非空,按对应吸嘴类型进行元件分配
nozzle_assign = nozzle_pattern[idx]
if len(tmp_nozzle_component[nozzle_assign]) == 0:
# 当前头对应吸嘴类型无可用元件,将计划分配的元件压入堆栈
part = max(tmp_feeder_points.keys(),
key=lambda x: tmp_feeder_points[x] / tmp_feeder_limit[x]
if tmp_feeder_limit[x] != 0 else 0)
for nozzle, component_list in tmp_nozzle_component.items():
if part in component_list:
nozzle_assign = nozzle
assign_part_stack.append(part)
assign_part_stack_points.append(feeder_division_points[part])
break
else:
# 当前头对应吸嘴类型有可用元件,直接分配对应类型的元件
index_ = tmp_nozzle_component[nozzle_assign].index(max(tmp_nozzle_component[nozzle_assign],
key=lambda x: tmp_feeder_points[x] /
tmp_feeder_limit[x] if
tmp_feeder_limit[x] != 0 else 0))
part = tmp_nozzle_component[nozzle_assign][index_]
feeder_type = component_data.loc[part].fdr
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval, 1
slot_overlap = False
while extra_width > 0:
slot_ = slot + idx * interval_ratio + extra_slot
if feeder_base[slot_] != -2 or slot_ > max_slot_index // 2:
slot_overlap = True
break
if idx + extra_slot // 2 < max_head_index and feeder_assign[idx + extra_slot // 2] >= 0:
slot_overlap = True
break
extra_width -= slot_interval
extra_slot += 1
# 可用供料器数目充足且不存在和已有供料器的占位冲突
if tmp_feeder_limit[part] > 0 and not slot_overlap:
feeder_assign[idx], feeder_assign_points[idx] = part, feeder_division_points[part]
extra_width, extra_head = feeder_width[feeder_type][0] + feeder_width[feeder_type][
1] - head_interval, 1
while extra_width > 0 and idx + extra_head < max_head_index:
feeder_assign[idx + extra_head] = -1
extra_head += 1
extra_width -= head_interval
else:
part = -1 # 存在位置冲突的元件,不占用可用供料器数
if part >= 0 and tmp_feeder_limit[part] == 0:
continue
if part in tmp_nozzle_component[nozzle_assign]:
part_index = tmp_nozzle_component[nozzle_assign].index(part)
tmp_nozzle_component[nozzle_assign].pop(part_index)
tmp_nozzle_component_points[nozzle_assign].pop(part_index)
tmp_feeder_limit[part] -= 1
tmp_feeder_points[part] -= feeder_division_points[part]
# 元件堆栈出栈,首先分配吸嘴类型一致的头
if nozzle_pattern:
for head, feeder in enumerate(feeder_assign):
if feeder != -2:
continue
for idx, part in enumerate(assign_part_stack):
feeder_type = component_data.loc[part].fdr
extra_width, extra_slot = feeder_width[feeder_type][0] + feeder_width[feeder_type][
1] - slot_interval, 1
slot_overlap = False
while extra_width > 0:
slot_ = slot + head * interval_ratio + extra_slot
if feeder_base[slot_] != -2 or slot_ > max_slot_index // 2:
slot_overlap = True
break
extra_width -= slot_interval
extra_slot += 1
if component_data.loc[part].nz == nozzle_pattern[head] and not slot_overlap:
feeder_assign[head], feeder_assign_points[head] = assign_part_stack[idx], \
assign_part_stack_points[idx]
assign_part_stack.pop(idx)
assign_part_stack_points.pop(idx)
break
# 元件堆栈,然后分配元件堆栈中未分配的其它元件
for head in head_assign_indexes:
if feeder_assign[head] != -2 or len(assign_part_stack) == 0:
continue
part, points = assign_part_stack[0], assign_part_stack_points[0]
feeder_type = component_data.loc[part].fdr
extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval
extra_slot = 1
slot_overlap = False
while extra_width > 0:
slot_ = slot + head * interval_ratio + extra_slot
if feeder_base[slot_] != -2 or slot_ > max_slot_index // 2:
slot_overlap = True
break
extra_width -= slot_interval
extra_slot += 1
if not slot_overlap:
feeder_assign[head], feeder_assign_points[head] = part, points
extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - head_interval
extra_head = 1
while extra_width > 0 and head + extra_head < max_head_index:
feeder_assign[head + extra_head] = -1
extra_head += 1
extra_width -= head_interval
else:
# 返还由于机械限位无法分配的,压入元件堆栈中的元素
nozzle = component_data.loc[part].nz
tmp_nozzle_component[nozzle].insert(0, part)
tmp_nozzle_component_points[nozzle].insert(0, points)
assign_part_stack.pop(0)
assign_part_stack_points.pop(0)
# 仍然存在由于机械限位,无法进行分配的在堆栈中的元件
while assign_part_stack:
part, points = assign_part_stack[0], assign_part_stack_points[0]
nozzle = component_data.loc[part].nz
tmp_nozzle_component[nozzle].insert(0, part)
tmp_nozzle_component_points[nozzle].insert(0, points)
assign_part_stack.pop(0)
assign_part_stack_points.pop(0)
nozzle_change_counter = 0
average_slot, average_head = [], []
for head, feeder_ in enumerate(feeder_assign):
if feeder_ < 0:
continue
average_slot.append((feeder_center_pos[feeder_] - slotf1_pos[0]) / slot_interval + 1)
average_head.append(head)
if nozzle_pattern and component_data.loc[feeder_].nz != nozzle_pattern[head]:
nozzle_change_counter += 1
if len(average_slot) == 0:
continue
average_slot = sum(average_slot) / len(average_slot) - sum(average_head) / len(average_head) * interval_ratio
assign_value = 0
feeder_assign_points_cpy = feeder_assign_points.copy()
while True:
points_filter = list(filter(lambda x: x > 0, feeder_assign_points_cpy))
if not points_filter:
break
assign_value += e_gang_pick * min(points_filter) * (len(points_filter) - 1)
for head, _ in enumerate(feeder_assign_points_cpy):
if feeder_assign_points_cpy[head] == 0:
continue
feeder_assign_points_cpy[head] -= min(points_filter)
assign_value -= (1e2 * e_nz_change * nozzle_change_counter + 1e-5 * abs(slot - average_slot))
if assign_value >= best_assign_value and sum(feeder_assign_points) != 0:
best_assign_value = assign_value
best_assign = feeder_assign.copy()
best_assign_points = feeder_assign_points.copy()
best_assign_slot = slot
best_nozzle_component, best_nozzle_component_points = \
tmp_nozzle_component.copy(), tmp_nozzle_component_points.copy()
if not best_assign_points:
break
for idx, part in enumerate(best_assign):
if part < 0:
continue
# 新安装的供料器
if feeder_base[best_assign_slot + idx * interval_ratio] != part:
# 除去分配给最大化同时拾取周期的项,保留结余项
feeder_base_points[best_assign_slot + idx * interval_ratio] += (
feeder_division_points[part] - min(filter(lambda x: x > 0, best_assign_points)))
feeder_points[part] -= feeder_division_points[part]
feeder_limit[part] -= 1
feeder_arrange[part] += 1
if feeder_limit[part] == 0:
feeder_division_points[part] = 0
for nozzle, components in nozzle_component.items():
if part in components:
index_ = components.index(part)
nozzle_component[nozzle].pop(index_)
nozzle_component_points[nozzle].pop(index_)
break
feeder_division_points[part] = 0
else:
# 已有的供料器
feeder_base_points[best_assign_slot + idx * interval_ratio] -= min(
filter(lambda x: x > 0, best_assign_points))
# 更新供料器基座信息
feeder_base[best_assign_slot + idx * interval_ratio] = part
feeder_type, extra_slot = component_data.loc[part].fdr, 0
extra_width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1] - slot_interval
while extra_width > 0:
extra_slot += 1
if feeder_base[best_assign_slot + idx * interval_ratio + extra_slot] == -2:
feeder_base[best_assign_slot + idx * interval_ratio + extra_slot] = -1 # 标记槽位已占用
else:
assert 'feeder allocation conflict'
extra_width -= slot_interval
# 更新吸嘴信息
nozzle_pattern[idx] = component_data.loc[part].nz
# 更新头分配的先后顺序
head_assign_indexes = np.array(best_assign_points).argsort().tolist()
nozzle_component, nozzle_component_points = copy.deepcopy(best_nozzle_component), copy.deepcopy(
best_nozzle_component_points)
assert not list(filter(lambda x: x < 0, feeder_limit.values())) # 分配供料器数目在限制范围内
# 更新供料器占位信息
for _, data in feeder_data.iterrows():
feeder_base[data.slot] = -1
for slot, feeder in enumerate(feeder_base):
if feeder < 0:
continue
part = component_data.loc[feeder].part
feeder_data.loc[len(feeder_data.index)] = [slot, part]
if figure:
# 绘制供料器位置布局
for slot in range(max_slot_index // 2):
plt.scatter(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1], marker='x', s=12, color='black', alpha=0.5)
plt.text(slotf1_pos[0] + slot_interval * slot, slotf1_pos[1] - 45, str(slot + 1), ha='center', va='bottom',
size=8)
feeder_assign_range = []
for _, feeder in feeder_data.iterrows():
part_index = component_data[component_data.part == feeder.part].index.tolist()[0]
feeder_type = component_data.loc[part_index].fdr
width = feeder_width[feeder_type][0] + feeder_width[feeder_type][1]
start = slotf1_pos[0] + slot_interval * (feeder.slot - 1) - slot_interval / 2
end = slotf1_pos[0] + slot_interval * (feeder.slot - 1) - slot_interval / 2 + width
rec_x = [start, end, end, start]
rec_y = [slotf1_pos[1] - 40, slotf1_pos[1] - 40, slotf1_pos[1] + 10, slotf1_pos[1] + 10]
c = 'red' if feeder.arg == 0 else 'black' # 黑色表示已分配,红色表示新分配
plt.text(slotf1_pos[0] + slot_interval * (feeder.slot - 1), slotf1_pos[1] + 12,
feeder.part + ': ' + str(feeder_points[part_index]), ha='center', size=7, rotation=90, color=c)
plt.fill(rec_x, rec_y, facecolor='yellow', alpha=0.4)
feeder_assign_range.append([start, end])
# 记录重叠区间
feeder_assign_range.sort(key=lambda x: x[0])
for i in range(1, len(feeder_assign_range)):
if feeder_assign_range[i][0] < feeder_assign_range[i - 1][1]:
start, end = feeder_assign_range[i][0], feeder_assign_range[i - 1][1]
rec_x = [start, end, end, start]
rec_y = [slotf1_pos[1] - 40, slotf1_pos[1] - 40, slotf1_pos[1] + 10, slotf1_pos[1] + 10]
plt.fill(rec_x, rec_y, facecolor='red')
plt.plot([slotf1_pos[0] - slot_interval / 2, slotf1_pos[0] + slot_interval * (max_slot_index // 2 - 1 + 0.5)],
[slotf1_pos[1] + 10, slotf1_pos[1] + 10], color='black')
plt.plot([slotf1_pos[0] - slot_interval / 2, slotf1_pos[0] + slot_interval * (max_slot_index // 2 - 1 + 0.5)],
[slotf1_pos[1] - 40, slotf1_pos[1] - 40], color='black')
for counter in range(max_slot_index // 2 + 1):
pos = slotf1_pos[0] + (counter - 0.5) * slot_interval
plt.plot([pos, pos], [slotf1_pos[1] + 10, slotf1_pos[1] - 40], color='black', linewidth=1)
plt.ylim(-10, 100)
plt.show()
def feeder_base_scan(component_data, pcb_data, feeder_data):
feeder_assign_check = set()
for _, feeder in feeder_data.iterrows():
feeder_assign_check.add(feeder.part)
component_points = [0] * len(component_data)
component_index = defaultdict(int)
for idx, data in component_data.iterrows():
if data.nz not in nozzle_limit.keys() or nozzle_limit[data.nz] <= 0:
info = 'there is no available nozzle [' + data.nz + '] for the assembly process'
raise ValueError(info)
component_points[idx] = data.points
component_index[data.part] = idx
assert len(feeder_assign_check) == len(component_points) - component_points.count(0) # 所有供料器均已分配槽位
mount_center_slot = defaultdict(float)
for _, data in pcb_data.iterrows():
part_index = component_index[data.part]
mount_center_slot[part_index] += (data.x - mount_center_slot[part_index])
for idx, pos in mount_center_slot.items():
mount_center_slot[idx] = (pos / component_points[idx] + stopper_pos[0] - slotf1_pos[0]) / slot_interval + 1
feeder_part = [-1] * max_slot_index
for _, data in feeder_data.iterrows():
component_index = component_data[component_data.part == data.part].index.tolist()
if len(component_index) != 1:
print('unregistered component: ', data.part, ' in slot', data.slot)
continue
component_index = component_index[0]
feeder_part[data.slot] = component_index
component_result, cycle_result, feeder_slot_result = [], [], [] # 贴装点索引和拾取槽位优化结果
sum_nozzle_points, nozzle_pattern = -1, None
for slot in range(max_slot_index // 2 - (max_head_index - 1) * interval_ratio):
cur_nozzle_points, cur_nozzle_pattern = 0, ['' for _ in range(max_head_index)]
for head in range(max_head_index):
if (part := feeder_part[slot + head * interval_ratio]) == -1:
continue
cur_nozzle_pattern[head] = component_data.loc[part].nz
cur_nozzle_points += component_points[part]
if cur_nozzle_points > sum_nozzle_points:
sum_nozzle_points = cur_nozzle_points
nozzle_pattern = cur_nozzle_pattern
nozzle_mode, nozzle_mode_cycle = [nozzle_pattern], [0] # 吸嘴匹配模式
value_increment_base = 0
while True:
# === 周期内循环 ===
assigned_part = [-1 for _ in range(max_head_index)] # 当前扫描到的头分配元件信息
assigned_cycle = [0 for _ in range(max_head_index)] # 当前扫描到的元件最大分配次数
assigned_slot = [-1 for _ in range(max_head_index)] # 当前扫描到的供料器分配信息
best_assigned_eval_func = -float('inf')
nozzle_insert_cycle = 0
for cycle_index, nozzle_cycle in enumerate(nozzle_mode):
scan_eval_func_list = [] # 若干次扫描得到的最优解
# nozzle_cycle 吸嘴模式下,已扫描到的最优结果
cur_scan_part = [-1 for _ in range(max_head_index)]
cur_scan_cycle = [0 for _ in range(max_head_index)]
cur_scan_slot = [-1 for _ in range(max_head_index)]
cur_nozzle_limit = copy.deepcopy(nozzle_limit)
while True:
best_scan_part = [-1 for _ in range(max_head_index)]
best_scan_cycle = [0 for _ in range(max_head_index)]
best_scan_slot = [-1 for _ in range(max_head_index)]
best_scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit)
scan_eval_func, search_break = -float('inf'), True
# 前供料器基座扫描
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
if sum(feeder_part[slot: slot + max_head_index * interval_ratio: interval_ratio]) == -max_head_index:
continue
scan_cycle, scan_part, scan_slot = cur_scan_cycle.copy(), cur_scan_part.copy(), cur_scan_slot.copy()
scan_nozzle_limit = copy.deepcopy(cur_nozzle_limit)
# 预扫描确定各类型元件拾取数目(前瞻)
preview_scan_part = defaultdict(int)
for head in range(max_head_index):
part = feeder_part[slot + head * interval_ratio]
# 贴装头和拾取槽位满足对应关系
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
part) < component_points[part]:
preview_scan_part[part] += 1
component_counter = 0
for head in range(max_head_index):
part = feeder_part[slot + head * interval_ratio]
# 1.匹配条件满足: 贴装头和拾取槽位满足对应关系
if scan_part[head] == -1 and part != -1 and component_points[part] > 0 and scan_part.count(
part) < component_points[part]:
# 2.匹配条件满足:不超过可用吸嘴数的限制
nozzle = component_data.loc[part].nz
if scan_nozzle_limit[nozzle] <= 0:
continue
# 3.增量条件满足: 引入新的元件类型不会使代价函数的值减少(前瞻)
if scan_cycle.count(0) == max_head_index:
gang_pick_change = component_points[part]
else:
prev_cycle = min(filter(lambda x: x > 0, scan_cycle))
# 同时拾取数的提升
gang_pick_change = min(prev_cycle, component_points[part] // preview_scan_part[part])
# 4.拾取移动距离条件满足: 邻近元件进行同时抓取,降低移动路径长度
# reference_slot = -1
# for head_, slot_ in enumerate(scan_slot):
# if slot_ != -1:
# reference_slot = slot_ - head_ * interval_ratio
# if reference_slot != -1 and abs(reference_slot - slot) > (max_head_index - 1) * interval_ratio:
# continue
# 5.同时拾取的增量 和 吸嘴更换次数比较
prev_nozzle_change = 0
if cycle_index + 1 < len(nozzle_mode):
prev_nozzle_change = 2 * (nozzle_cycle[head] != nozzle_mode[cycle_index + 1][head])
# 避免首个周期吸杆占用率低的问题
nozzle_change = 2 * (nozzle != nozzle_cycle[head])
if cycle_index + 1 < len(nozzle_mode):
nozzle_change += 2 * (nozzle != nozzle_mode[cycle_index + 1][head])
nozzle_change -= prev_nozzle_change
val = e_gang_pick * gang_pick_change - e_nz_change * nozzle_change
if val < value_increment_base:
continue
component_counter += 1
scan_part[head] = part
scan_cycle[head] = component_points[part] // preview_scan_part[part]
scan_slot[head] = slot + head * interval_ratio
scan_nozzle_limit[nozzle] -= 1
nozzle_counter = 0 # 吸嘴更换次数
# 上一周期
for head, nozzle in enumerate(nozzle_cycle):
if scan_part[head] == -1:
continue
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
nozzle_counter += 2
# 下一周期(额外增加的吸嘴更换次数)
if cycle_index + 1 < len(nozzle_mode):
for head, nozzle in enumerate(nozzle_mode[cycle_index + 1]):
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
else:
for head, nozzle in enumerate(nozzle_mode[0]):
if scan_part[head] == -1:
continue
prev_counter, new_counter = 0, 0
if nozzle_cycle[head] != nozzle and nozzle_cycle[head] != '' and nozzle != '':
prev_counter += 2
if component_data.loc[scan_part[head]].nz != nozzle and nozzle != '':
new_counter += 2
nozzle_counter += new_counter - prev_counter
if component_counter == 0: # 当前情形下未扫描到任何元件
continue
search_break = False
scan_part_head = defaultdict(list)
for head, part in enumerate(scan_part):
if part == -1:
continue
scan_part_head[part].append(head)
for part, heads in scan_part_head.items():
part_cycle = component_points[part] // len(heads)
for head in heads:
scan_cycle[head] = part_cycle
# 计算扫描后的代价函数,记录扫描后的最优解
# 短期收益
cycle = min(filter(lambda x: x > 0, scan_cycle))
gang_pick_counter, gang_pick_slot_set = 0, set()
for head, pick_slot in enumerate(scan_slot):
gang_pick_slot_set.add(pick_slot - head * interval_ratio)
eval_func_short_term = e_gang_pick * (max_head_index - scan_slot.count(-1) - len(
gang_pick_slot_set)) * cycle - e_nz_change * nozzle_counter
# 长期收益
gang_pick_slot_dict = defaultdict(list)
for head, pick_slot in enumerate(scan_slot):
gang_pick_slot_dict[pick_slot - head * interval_ratio].append(scan_cycle[head])
eval_func_long_term = 0
for pick_cycle in gang_pick_slot_dict.values():
while pick_cycle:
min_cycle = min(pick_cycle)
eval_func_long_term += e_gang_pick * (len(pick_cycle) - 1) * min(pick_cycle)
pick_cycle = list(map(lambda c: c - min_cycle, pick_cycle))
pick_cycle = list(filter(lambda c: c > 0, pick_cycle))
eval_func_long_term -= e_nz_change * nozzle_counter
# 拾取过程中的移动路径
pick_slot_set = set()
for head, pick_slot in enumerate(scan_slot):
if pick_slot == -1:
continue
pick_slot_set.add(pick_slot - head * interval_ratio)
slot_offset = 0
for head, part in enumerate(scan_part):
if part == -1:
continue
slot_offset += abs(scan_slot[head] - mount_center_slot[part])
ratio = 0.5
eval_func = (1 - ratio) * eval_func_short_term + ratio * eval_func_long_term - 1e-5 * (
max(pick_slot_set) - min(pick_slot_set)) - 1e-5 * slot_offset
if eval_func >= scan_eval_func:
scan_eval_func = eval_func
best_scan_part, best_scan_cycle = scan_part.copy(), scan_cycle.copy()
best_scan_slot = scan_slot.copy()
best_scan_nozzle_limit = copy.deepcopy(scan_nozzle_limit)
if search_break:
break
scan_eval_func_list.append(scan_eval_func)
cur_scan_part = best_scan_part.copy()
cur_scan_slot = best_scan_slot.copy()
cur_scan_cycle = best_scan_cycle.copy()
cur_nozzle_limit = copy.deepcopy(best_scan_nozzle_limit)
if len(scan_eval_func_list) and sum(scan_eval_func_list) > best_assigned_eval_func:
best_assigned_eval_func = sum(scan_eval_func_list)
assigned_part = cur_scan_part.copy()
assigned_slot = cur_scan_slot.copy()
assigned_cycle = cur_scan_cycle.copy()
nozzle_insert_cycle = cycle_index
# 从供料器基座中移除对应数量的贴装点
nonzero_cycle = [cycle for cycle in assigned_cycle if cycle > 0]
if not nonzero_cycle:
value_increment_base -= max_head_index
continue
for head, slot in enumerate(assigned_slot):
if assigned_part[head] == -1:
continue
component_points[feeder_part[slot]] -= min(nonzero_cycle)
insert_cycle = sum([nozzle_mode_cycle[c] for c in range(nozzle_insert_cycle + 1)])
component_result.insert(insert_cycle, assigned_part)
cycle_result.insert(insert_cycle, min(nonzero_cycle))
feeder_slot_result.insert(insert_cycle, assigned_slot)
# 更新吸嘴匹配模式
cycle_nozzle = nozzle_mode[nozzle_insert_cycle].copy()
for head, component in enumerate(assigned_part):
if component == -1:
continue
cycle_nozzle[head] = component_data.loc[component].nz
if cycle_nozzle == nozzle_mode[nozzle_insert_cycle]:
nozzle_mode_cycle[nozzle_insert_cycle] += 1
elif nozzle_insert_cycle + 1 < len(nozzle_mode) and cycle_nozzle == nozzle_mode[nozzle_insert_cycle + 1]:
nozzle_mode_cycle[nozzle_insert_cycle + 1] += 1
else:
nozzle_mode.insert(nozzle_insert_cycle + 1, cycle_nozzle)
nozzle_mode_cycle.insert(nozzle_insert_cycle + 1, 1)
if sum(component_points) == 0:
break
return component_result, cycle_result, feeder_slot_result

View File

@@ -1,13 +1,5 @@
import copy
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from base_optimizer.optimizer_common import *
from collections import defaultdict
from base_optimizer.smtopt_route import *
def dynamic_programming_cycle_path(cycle_placement, cycle_points):
head_sequence = []
@@ -74,7 +66,7 @@ def dynamic_programming_cycle_path(cycle_placement, cycle_points):
return head_sequence
def pickup_group_combination(component_nozzle, designated_nozzle, supply, supply_cycle, demand, demand_cycle):
def pickup_group_combination(designated_nozzle, supply, supply_cycle, demand, demand_cycle):
combination, combination_cycle = demand.copy(), demand_cycle.copy()
supply_cpy = supply.copy()
@@ -109,6 +101,9 @@ def pickup_group_combination(component_nozzle, designated_nozzle, supply, supply
combination_cycle[idx + max_match_offset] = supply_cycle[idx]
supply_cpy[idx] = None
if max_match_counter == 0:
break
return combination, combination_cycle
@@ -166,7 +161,7 @@ def cal_individual_val(component_nozzle, component_point_pos, designated_nozzle,
if is_combinable:
cost = cost - t0
# combine sequenced pickup ρb and ps into ρu(union pickup)
Pu, Pu_cycle = pickup_group_combination(component_nozzle, designated_nozzle, Ps, Ps_cycle, Pd, Pd_cycle)
Pu, Pu_cycle = pickup_group_combination(designated_nozzle, Ps, Ps_cycle, Pd, Pd_cycle)
# decide the placement cluster and sequencing of pickup ρu
pickup_action_counter, place_action_counter = 0, max_head_index - Pu.count(None)
@@ -236,8 +231,8 @@ def cal_individual_val(component_nozzle, component_point_pos, designated_nozzle,
def convert_individual_2_result(component_data, component_point_pos, designated_nozzle, pickup_group,
pickup_group_cycle, pair_group, feeder_lane, individual):
component_result, cycle_result, feeder_slot_result = [], [], []
placement_result, head_sequence_result = [], []
# === 记录不同元件对应的槽位 ===
feeder_part_arrange = defaultdict(list)
@@ -278,32 +273,11 @@ def convert_individual_2_result(component_data, component_point_pos, designated_
pickup_cycle_result[idx][head] -= cycle
component_point_index = defaultdict(int)
for cycle_set in range(len(cycle_result)):
for cycle in range(cycle_result[cycle_set]):
placement_result.append([-1 for _ in range(max_head_index)])
mount_point = [[0, 0] for _ in range(max_head_index)]
for head in range(max_head_index):
part_index = component_result[cycle_set][head]
if part_index == -1:
continue
part = component_data.iloc[part_index]['part']
point_info = component_point_pos[part][component_point_index[part]]
placement_result[-1][head] = point_info[2]
mount_point[head] = point_info[0:2]
component_point_index[part] += 1
head_sequence_result.append(dynamic_programming_cycle_path(placement_result[-1], mount_point))
return component_result, cycle_result, feeder_slot_result, placement_result, head_sequence_result
return component_result, cycle_result, feeder_slot_result
@timer_wrapper
def optimizer_hybrid_genetic(pcb_data, component_data, hinter=True):
random.seed(0)
np.random.seed(0)
nozzle_assigned_counter = optimal_nozzle_assignment(component_data, pcb_data)
# nozzle assignment result:
@@ -323,7 +297,7 @@ def optimizer_hybrid_genetic(pcb_data, component_data, hinter=True):
idx = component_data[component_data['part'] == part].index.tolist()[0]
nozzle = component_data.loc[idx]['nz']
component_feeder_limit[part] = component_data.loc[idx]['feeder-limit']
component_feeder_limit[part] = component_data.loc[idx].fdn
component_points[part] += 1
if nozzle_components[nozzle].count(part) < component_feeder_limit[part]:
nozzle_components[nozzle].append(part)
@@ -517,11 +491,6 @@ def optimizer_hybrid_genetic(pcb_data, component_data, hinter=True):
pop_val.append(val) # val is related to assembly time
for _ in range(n_generations):
# idx = np.argmin(pop_val)
# if len(best_pop_val) == 0 or pop_val[idx] < best_pop_val[-1]:
# best_individual = copy.deepcopy(population[idx])
# best_pop_val.append(pop_val[idx])
# min-max convert
max_val = 1.5 * max(pop_val)
convert_pop_val = list(map(lambda v: max_val - v, pop_val))
@@ -572,6 +541,15 @@ def optimizer_hybrid_genetic(pcb_data, component_data, hinter=True):
pbar.update(1)
best_individual = population[np.argmin(pop_val)]
component_result, cycle_result, feeder_slot_result = convert_individual_2_result(component_data,
component_point_pos,
designated_nozzle, pickup_group,
pickup_group_cycle, pair_group,
feeder_lane, best_individual)
placement_result, head_sequence_result = place_cluster_greedy_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result)
return component_result, cycle_result, feeder_slot_result, placement_result, head_sequence_result
return convert_individual_2_result(component_data, component_point_pos, designated_nozzle, pickup_group,
pickup_group_cycle, pair_group, feeder_lane, best_individual)

View File

@@ -0,0 +1,376 @@
from base_optimizer.optimizer_common import *
from base_optimizer.smtopt_route import *
def head_task_model(component_data, pcb_data, hinter=True):
mdl = Model('pick_route')
mdl.setParam('Seed', 0)
mdl.setParam('OutputFlag', hinter) # set whether output the debug information
mdl.setParam('TimeLimit', 1000)
H = max_head_index
I = len(component_data)
K = len(pcb_data)
nozzle_type, component_type = [], []
for _, data in component_data.iterrows():
if not data.nz in nozzle_type:
nozzle_type.append(data.nz)
component_type.append(data.part)
average_pos = 0
for _, data in pcb_data.iterrows():
average_pos += data.x
slot_start = int(round(average_pos / len(pcb_data) + stopper_pos[0] - slotf1_pos[0]) / slot_interval) + 1
r = 1
J = len(nozzle_type)
M = 10000
CompOfNozzle = [[0 for _ in range(J)] for _ in range(I)] # Compatibility
component_point, component_fdn = [0 for _ in range(I)], [0 for _ in range(I)]
for _, data in pcb_data.iterrows():
idx = component_data[component_data.part == data.part].index.tolist()[0]
nozzle = component_data.iloc[idx].nz
CompOfNozzle[idx][nozzle_type.index(nozzle)] = 1
component_point[idx] += 1
component_fdn[idx] = component_data.iloc[idx].fdn
S = sum(component_fdn)
# objective related
g = mdl.addVars(list_range(K), vtype=GRB.BINARY)
d = mdl.addVars(list_range(K), list_range(H), vtype=GRB.CONTINUOUS)
u = mdl.addVars(list_range(K), vtype=GRB.INTEGER)
d_plus = mdl.addVars(list_range(J), list_range(H), list_range(K), vtype=GRB.CONTINUOUS)
d_minus = mdl.addVars(list_range(J), list_range(H), list_range(K), vtype=GRB.CONTINUOUS)
e = mdl.addVars(list_range(-(H - 1) * r, S), list_range(K), vtype=GRB.BINARY)
f = mdl.addVars(list_range(S), list_range(I), vtype=GRB.BINARY, name='')
x = mdl.addVars(list_range(I), list_range(K), list_range(H), vtype=GRB.BINARY)
y = mdl.addVars(list_range(S), list_range(K), list_range(H), vtype=GRB.BINARY)
z = mdl.addVars(list_range(J), list_range(K), list_range(H), vtype=GRB.BINARY)
mdl.addConstrs(g[k] >= g[k + 1] for k in range(K - 1))
mdl.addConstrs(
quicksum(x[i, k, h] for i in range(I)) <= g[k] for k in range(K) for h in range(H))
# nozzle no more than 1 for head h and cycle k
mdl.addConstrs(quicksum(z[j, k ,h] for j in range(J)) <= 1 for k in range(K) for h in range(H))
# nozzle available number constraint
mdl.addConstrs(quicksum(z[j, k, h] for h in range(H)) <= H for k in range(K) for j in range(J))
# work completion
mdl.addConstrs(quicksum(x[i, k, h] for k in range(K) for h in range(H)) == component_point[i] for i in range(I))
# nozzle change
mdl.addConstrs(
x[i, k, h] <= quicksum(CompOfNozzle[i][j] * z[j, k, h] for j in range(J)) for i in range(I) for k in range(K)
for h in range(H))
mdl.addConstrs(
z[j, k, h] - z[j, k + 1, h] == d_plus[j, h, k] - d_minus[j, h, k] for k in range(K - 1) for j in range(J) for h
in range(H))
mdl.addConstrs(
z[j, 0, h] - z[j, K - 1, h] == d_plus[j, h, K - 1] - d_minus[j, h, K - 1] for j in range(J) for h in range(H))
mdl.addConstrs(
d[k, h] == quicksum(d_plus[j, h, k] for j in range(J)) + quicksum(d_minus[j, h, k] for j in range(J)) for k
in range(K) for h in range(H))
# simultaneous pick
for s in range(-(H - 1) * r, S):
rng = list(range(max(0, -math.floor(s / r)), min(H, math.ceil((S - s) / r))))
for k in range(K):
mdl.addConstr(quicksum(y[s + h * r, k, h] for h in rng) <= M * e[s, k], name='')
mdl.addConstr(quicksum(y[s + h * r, k, h] for h in rng) >= e[s, k], name='')
# pickup movement
mdl.addConstrs(
u[k] >= s1 * e[s1, k] - s2 * e[s2, k] + M * (e[s1, k] + e[s2, k] - 2) for s1 in range(-(H - 1) * r, S) for s2 in
range(-(H - 1) * r, S) for k in range(K))
# feeder related
mdl.addConstrs(quicksum(f[s, i] for s in range(S)) <= component_fdn[i] for i in range(I))
mdl.addConstrs(quicksum(f[s, i] for i in range(I)) <= 1 for s in range(S))
mdl.addConstrs(
quicksum(x[i, k, h] * y[s, k, h] for h in range(H) for k in range(K)) >= f[s, i] for i in range(I) for s in range(S))
mdl.addConstrs(
quicksum(x[i, k, h] * y[s, k, h] for h in range(H) for k in range(K)) <= M * f[s, i] for i in range(I) for s in
range(S))
# relationship
mdl.addConstrs(
quicksum(x[i, k, h] for i in range(I)) == quicksum(y[s, k, h] for s in range(S)) for k in range(K) for h in
range(H))
# objective
mdl.setObjective(Fit_cy * quicksum(g[k] for k in range(K)) + Fit_nz * quicksum(
d[k, h] for h in range(H) for k in range(K)) + Fit_pu * quicksum(
e[s, k] for s in range(-(H - 1) * r, S) for k in range(K)) + Fit_mv * quicksum(u[k] for k in range(K)),
GRB.MINIMIZE)
mdl.optimize()
component_result, cycle_result, feeder_slot_result = [], [], []
for k in range(K):
if abs(g[k].x) < 1e-6:
continue
component_assign, feeder_slot_assign = [-1 for _ in range(H)], [-1 for _ in range(H)]
for h in range(H):
for i in range(I):
if abs(x[i, k, h].x) > 1e-6:
component_assign[h] = i
for s in range(S):
if abs(y[s, k, h].x) > 1e-6:
feeder_slot_assign[h] = slot_start + s * interval_ratio - 1
if sum(component_assign) != -H:
component_result.append(component_assign)
feeder_slot_result.append(feeder_slot_assign)
cycle_result.append(1)
if hinter:
print(component_result)
print(feeder_slot_result)
return component_result, cycle_result, feeder_slot_result
def place_route_model(component_data, pcb_data, component_result, feeder_slot_result, figure=False, hinter=True):
mdl = Model('place_route')
mdl.setParam('Seed', 0)
mdl.setParam('OutputFlag', hinter) # set whether output the debug information
mdl.setParam('TimeLimit', 1000)
component_type = []
for _, data in component_data.iterrows():
component_type.append(data.part)
pos = []
for _, data in pcb_data.iterrows():
pos.append([data.x + stopper_pos[0], data.y + stopper_pos[1]])
I, P, H = len(component_data), len(pcb_data), max_head_index
A = []
for h1 in range(H):
for h2 in range(H):
if h1 == h2:
continue
A.append([h1, h2])
K = len(component_result)
CompOfPoint = [[0 for _ in range(P)] for _ in range(I)]
for row, data in pcb_data.iterrows():
idx = component_type.index(data.part)
CompOfPoint[idx][row] = 1
d_FW, d_PL, d_BW = np.zeros([P, K, H]), np.zeros([P, P, len(A)]), np.zeros([P, K, H])
for k in range(K):
min_slot, max_slot = float('inf'), float('-inf')
for h in range(H):
if feeder_slot_result[k][h] == -1:
continue
min_slot = min(min_slot, feeder_slot_result[k][h] - h * interval_ratio)
max_slot = max(max_slot, feeder_slot_result[k][h] - h * interval_ratio)
for p in range(P):
for h in range(H):
d_FW[p, k, h] = max(
abs(slotf1_pos[0] + (max_slot - 1) * slot_interval - pos[p][0] + h * head_interval),
abs(slotf1_pos[1] - pos[p][1]))
d_BW[p, k, h] = max(
abs(slotf1_pos[0] + (min_slot - 1) * slot_interval - pos[p][0] + h * head_interval),
abs(slotf1_pos[1] - pos[p][1]))
for p in range(P):
for q in range(P):
for idx, arc in enumerate(A):
h1, h2 = arc
d_PL[p, q, idx] = max(abs(pos[p][0] - pos[q][0] - (h1 - h2) * head_interval), abs(pos[p][1] - pos[q][1]))
w = mdl.addVars(list_range(P), list_range(P), list_range(K), list_range(len(A)), vtype=GRB.BINARY)
y = mdl.addVars(list_range(P), list_range(K), list_range(H), vtype=GRB.BINARY)
z = mdl.addVars(list_range(P), list_range(K), list_range(H), vtype=GRB.BINARY)
def A_from(h):
res = []
for idx, arc in enumerate(A):
if arc[0] == h:
res.append(idx)
return res
def A_to(h):
res = []
for idx, arc in enumerate(A):
if arc[1] == h:
res.append(idx)
return res
def A_contain(h):
res = []
for idx, arc in enumerate(A):
if h in arc:
res.append(idx)
return res
# constraints on component assignment type, assigned points cannot conflict with the corresponding component type
for k in range(K):
for h in range(H):
if component_result[k][h] == -1:
# no components on the head
mdl.addConstrs(quicksum(w[p, q, k, a] for a in A_from(h) for q in range(P)) + quicksum(
w[q, p, k, a] for a in A_to(h) for q in range(P)) + y[p, k, h] + z[p, k, h] <= 0 for p in range(P))
else:
# there are components on the head
mdl.addConstrs(quicksum(w[p, q, k, a] for a in A_from(h) for q in range(P)) + quicksum(
w[q, p, k, a] for a in A_to(h) for q in range(P)) + y[p, k, h] + z[p, k, h] <= 2 *
CompOfPoint[component_result[k][h]][p] for p in range(P))
# each head corresponds to a maximum of one point in each cycle
mdl.addConstrs(
quicksum(w[p, q, k, a] for p in range(P) for q in range(P) for a in A_contain(h))
+ quicksum(y[p, k, h] + z[p, k, h] for p in range(P)) <= 2 for k in range(K) for h
in range(H))
# mdl.addConstrs(
# quicksum((y[p, k, h] + z[p, k, h]) for p in range(P)) <= 2 for k in range(K) for h in
# range(H))
# task continuity (for the same point the entering head and the leaving head should be same)
mdl.addConstrs(quicksum(w[p, q, k, a] for p in range(P) for a in A_to(h)) + y[q, k, h] == quicksum(
w[q, p, k, a] for p in range(P) for a in A_from(h)) + z[q, k, h] for k in range(K) for h in range(H) for q in
range(P))
mdl.addConstrs(
y[p, k, h] <= quicksum(w[p, q, k, a] for q in range(P) for a in A_from(h)) + z[p, k, h] for h in range(H) for p
in range(P) for k in range(K))
mdl.addConstrs(
z[p, k, h] <= quicksum(w[q, p, k, a] for q in range(P) for a in A_to(h)) + y[p, k, h] for h in range(H) for p in
range(P) for k in range(K))
# one arrival point per cycle
mdl.addConstrs(quicksum(y[p, k, h] for p in range(P) for h in range(H)) == 1 for k in range(K))
# one departure point per cycle
mdl.addConstrs(quicksum(z[p, k, h] for p in range(P) for h in range(H)) == 1 for k in range(K))
# one enter edge per point
mdl.addConstrs(quicksum(y[q, k, h] for h in range(H) for k in range(K)) + quicksum(
w[p, q, k, a] for p in range(P) for a in range(len(A)) for k in range(K)) == 1 for q in range(P))
# one leaving edge per point
mdl.addConstrs(quicksum(z[q, k, h] for h in range(H) for k in range(K)) + quicksum(
w[q, p, k, a] for p in range(P) for a in range(len(A)) for k in range(K)) == 1 for q in range(P))
# subtour eliminate constraint
n = mdl.addVars(list_range(P), vtype=GRB.CONTINUOUS)
m = mdl.addVars(list_range(P), vtype=GRB.CONTINUOUS)
v = mdl.addVars(list_range(P), list_range(P), vtype=GRB.CONTINUOUS)
mdl.addConstrs(
m[p] + quicksum(v[p, q] for q in range(P)) - n[p] - quicksum(v[q, p] for q in range(P)) == 1 for p in range(P))
mdl.addConstrs(
v[p, q] <= (P - K + 1) * quicksum(w[p, q, k, a] for a in range(len(A)) for k in range(K)) for p in range(P) for
q in range(P))
mdl.addConstrs(n[p] <= (P - K + 1) * quicksum(y[p, k, h] for h in range(H) for k in range(K)) for p in range(P))
mdl.addConstrs(m[p] <= (P - K + 1) * quicksum(z[p, k, h] for h in range(H) for k in range(K)) for p in range(P))
# objective
mdl.setObjective(
quicksum(d_FW[p, k, h] * y[p, k, h] for p in range(P) for k in range(K) for h in range(H)) + quicksum(
d_PL[p, q, a] * w[p, q, k, a] for k in range(K) for p in range(P) for q in range(P) for a in
range(len(A))) + quicksum(d_BW[p, k, h] * z[p, k, h] for p in range(P) for k in range(K) for h in range(H)),
GRB.MINIMIZE)
mdl.optimize()
if figure:
for k in range(K):
plt.scatter([p[0] for p in pos[0:8]], [p[1] for p in pos[0:8]], color='red')
plt.scatter([p[0] for p in pos[8:]], [p[1] for p in pos[8:]], color='blue')
for p in range(P):
for q in range(P):
for idx, arc in enumerate(A):
if abs(w[p, q, k, idx].x) > 1e-6:
h1, h2 = arc
plt.plot([pos[p][0] - h1 * head_interval, pos[q][0] - h2 * head_interval],
[pos[p][1], pos[q][1]], linestyle='-.', color='black', linewidth=1)
plt.text(pos[p][0] - h1 * head_interval, pos[p][1], 'H%d' % (h1 + 1), ha='center',
va='bottom', size=10)
for h in range(H):
if abs(y[p, k, h].x) > 1e-6:
plt.plot([pos[p][0] - h * head_interval, 500], [pos[p][1], 100], linestyle='-.', color='black',
linewidth=1)
plt.text(pos[p][0] - h * head_interval, pos[p][1], 'H%d' % (h + 1), ha='center', va='bottom',
size=10)
for h in range(H):
if abs(z[p, k, h].x) > 1e-6:
plt.plot([pos[p][0] - h * head_interval, 900], [pos[p][1], 100], linestyle='-.', color='black',
linewidth=1)
plt.text(pos[p][0] - h * head_interval, pos[p][1], 'H%d' % (h + 1), ha='center', va='bottom',
size=10)
plt.show()
# convert model result into standard form
placement_result, head_sequence = [[-1 for _ in range(H)] for _ in range(K)], [[] for _ in range(K)]
for k in range(K):
arc_list = []
for p in range(P):
for q in range(P):
for idx, arc in enumerate(A):
if abs(w[p, q, k, idx].x) > 1e-6:
plt.plot([pos[p][0], pos[q][0]], [pos[p][1], pos[q][1]], linestyle='-.', color='black',
linewidth=1)
placement_result[k][arc[0]], placement_result[k][arc[1]] = p, q
arc_list.append(arc)
head, idx = -1, 0
for p in range(P):
for h in range(H):
if abs(y[p, k, h].x) > 1e-6:
head = h
if placement_result[k][h] == -1:
placement_result[k][h] = p
assert placement_result[k][h] == p
if abs(z[p, k, h].x) > 1e-6:
if placement_result[k][h] == -1:
placement_result[k][h] = p
assert placement_result[k][h] == p
while idx < len(arc_list):
for i, arc in enumerate(arc_list):
if arc[0] == head:
head_sequence[k].append(head)
head = arc[1]
idx += 1
break
head_sequence[k].append(head)
return placement_result, head_sequence
@timer_wrapper
def optimizer_mathmodel(component_data, pcb_data, hinter=True):
component_result, cycle_result, feeder_slot_result = head_task_model(component_data, pcb_data, hinter)
# placement_result, head_sequence = place_route_model(component_data, pcb_data, component_result, feeder_slot_result)
placement_result, head_sequence = place_allocate_sequence_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result)
return component_result, cycle_result, feeder_slot_result, placement_result, head_sequence

View File

@@ -1,4 +1,3 @@
import itertools
from base_optimizer.optimizer_common import *

View File

@@ -0,0 +1,507 @@
from base_optimizer.optimizer_common import *
from base_optimizer.smtopt_route import *
from base_optimizer.result_analysis import *
def list_range(start, end=None):
return list(range(start)) if end is None else list(range(start, end))
@timer_wrapper
def gurobi_optimizer(pcb_data, component_data, feeder_data, reduction=True, partition=True, initial=False, hinter=True):
# data preparation: convert data to index
component_list, nozzle_list = defaultdict(int), defaultdict(int)
component_feeder = defaultdict(int)
cpidx_2_part, nzidx_2_nozzle, cpidx_2_nzidx = {}, {}, {}
arg_slot_rng = None if len(feeder_data) == 0 else [feeder_data.iloc[0].slot, feeder_data.iloc[-1].slot]
for idx, data in component_data.iterrows():
part, nozzle = data.part, data.nz
cpidx_2_part[idx] = part
nz_key = [key for key, val in nzidx_2_nozzle.items() if val == nozzle]
nz_idx = len(nzidx_2_nozzle) if len(nz_key) == 0 else nz_key[0]
nzidx_2_nozzle[nz_idx] = nozzle
component_list[part] = 0
component_feeder[part] = data.fdn
cpidx_2_nzidx[idx] = nz_idx
for _, data in pcb_data.iterrows():
idx = component_data[component_data.part == data.part].index.tolist()[0]
nozzle = component_data.loc[idx].nz
nozzle_list[nozzle] += 1
component_list[data.part] += 1
part_feederbase = defaultdict(int)
if feeder_data is not None:
for _, data in feeder_data.iterrows():
idx = -1
for idx, part_ in cpidx_2_part.items():
if data.part == part_:
break
assert idx != -1
part_feederbase[idx] = data.slot # part index - slot
ratio = 1 if reduction else 2
I, J = len(cpidx_2_part.keys()), len(nzidx_2_nozzle.keys())
# === determine the hyper-parameter of L ===
# first phase: calculate the number of heads for each type of nozzle
nozzle_heads = defaultdict(int)
for nozzle in nozzle_list.keys():
nozzle_heads[nozzle] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_list[nozzle] / head_num > nozzle_list[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
nozzle_comp_points = defaultdict(list)
for part, points in component_list.items():
idx = component_data[component_data.part == part].index.tolist()[0]
nozzle = component_data.loc[idx].nz
nozzle_comp_points[nozzle].append([part, points])
level = 1 if len(component_list) == 1 or len(component_list) % max_head_index == 0 else 2
part_assignment, cycle_assignment = [], []
def aux_func(info):
return max(map(lambda points: max([p[1] for p in points]), info))
pre_objbst, pre_changetime = None, None
def terminate_condition(mdl, where):
if where == GRB.Callback.MIP:
objbst, objbnd = mdl.cbGet(GRB.Callback.MIP_OBJBST), mdl.cbGet(GRB.Callback.MIP_OBJBND)
changetime = mdl.cbGet(GRB.Callback.RUNTIME)
nonlocal pre_objbst, pre_changetime
# condition: value change
if abs(objbst - 1e+100) > 1: # 避免未找到可行解提前退出
if pre_objbst and abs(pre_objbst - objbst) < 1e-3:
if pre_changetime and changetime - pre_changetime > 90 * (1 - objbnd / objbst):
mdl.terminate()
else:
pre_changetime = changetime
pre_objbst = objbst
def recursive_assign(assign_points, nozzle_compo_points, cur_level, total_level) -> int:
def func(points):
return map(lambda points: max([p[1] for p in points]), points)
if cur_level > total_level and sum(func(nozzle_compo_points.values())) == 0:
return 0
elif assign_points <= 0 and cur_level == 1:
return -1 # backtrack
elif assign_points <= 0 or cur_level > total_level:
return 1 # fail
nozzle_compo_points_cpy = copy.deepcopy(nozzle_compo_points)
prev_assign = 0
for part in part_assignment[cur_level - 1]:
if part != -1:
prev_assign += 1
head_idx = 0
for nozzle, head in nozzle_heads.items():
while head:
min_idx = -1
for idx, (part, points) in enumerate(nozzle_compo_points_cpy[nozzle]):
if points >= assign_points and (
min_idx == -1 or points < nozzle_compo_points_cpy[nozzle][min_idx][1]):
min_idx = idx
part_assignment[cur_level - 1][head_idx] = -1 if min_idx == -1 else \
nozzle_compo_points_cpy[nozzle][min_idx][0]
if min_idx != -1:
nozzle_compo_points_cpy[nozzle][min_idx][1] -= assign_points
head -= 1
head_idx += 1
cycle_assignment[cur_level - 1] = assign_points
for part in part_assignment[cur_level - 1]:
if part != -1:
prev_assign -= 1
if prev_assign == 0:
res = 1
else:
points = min(len(pcb_data) // max_head_index + 1, aux_func(nozzle_compo_points_cpy.values()))
res = recursive_assign(points, nozzle_compo_points_cpy, cur_level + 1, total_level)
if res == 0:
return 0
elif res == 1:
# All cycles have been completed, but there are still points left to be allocated
return recursive_assign(assign_points - 1, nozzle_compo_points, cur_level, total_level)
# second phase: (greedy) recursive search to assign points for each cycle set and obtain an initial solution
while True:
part_assignment = [[-1 for _ in range(max_head_index)] for _ in range(level)]
cycle_assignment = [-1 for _ in range(level)]
points = min(len(pcb_data) // max_head_index + 1, max(component_list.values()))
if recursive_assign(points, nozzle_comp_points, 1, level) == 0:
break
level += 1
L = len(cycle_assignment) if partition else len(pcb_data)
S = ratio * sum(component_feeder.values()) if len(feeder_data) == 0 else arg_slot_rng[-1] - arg_slot_rng[0] + 1 # the available feeder num
M = len(pcb_data) # a sufficiently large number (number of placement points)
HC = [[0 for _ in range(J)] for _ in range(I)]
for i in range(I):
for j in range(J):
HC[i][j] = 1 if cpidx_2_nzidx[i] == j else 0
mdl = Model('SMT')
mdl.setParam('Seed', 0)
mdl.setParam('OutputFlag', hinter) # set whether output the debug information
mdl.setParam('TimeLimit', 3600)
mdl.setParam('PoolSearchMode', 2)
mdl.setParam('PoolSolutions', 100)
mdl.setParam('PoolGap', 1e-4)
# mdl.setParam('MIPFocus', 2)
mdl.setParam("Heuristics", 0.5)
# Use only if other methods, including exploring the tree with the default settings, do not yield a viable solution
# mdl.setParam("ZeroObjNodes", 100)
# === Decision Variables ===
x = mdl.addVars(list_range(I), list_range(S), list_range(max_head_index), list_range(L), vtype=GRB.BINARY, name='x')
y = mdl.addVars(list_range(I), list_range(max_head_index), list_range(L), vtype=GRB.BINARY, name='y')
v = mdl.addVars(list_range(S), list_range(max_head_index), list_range(L), vtype=GRB.BINARY, name='v')
c = mdl.addVars(list_range(I), list_range(max_head_index), list_range(L), vtype=GRB.INTEGER, name='c')
mdl.addConstrs(
c[i, h, l] <= component_list[cpidx_2_part[i]] for i in range(I) for h in range(max_head_index) for l in
range(L))
f = {}
for i in range(I):
if i not in part_feederbase.keys():
for s in range(S):
f[s, i] = mdl.addVar(vtype=GRB.BINARY, name='f_' + str(s) + '_' + str(i))
else:
for s in range(S):
f[s, i] = 1 if part_feederbase[i] == s + arg_slot_rng[0] else 0
p = mdl.addVars(list_range(-(max_head_index - 1) * ratio, S), list_range(L), vtype=GRB.BINARY, name='p')
z = mdl.addVars(list_range(J), list_range(max_head_index), list_range(L), vtype=GRB.BINARY)
d = mdl.addVars(list_range(L), list_range(max_head_index), vtype=GRB.INTEGER, name='d')
d_plus = mdl.addVars(list_range(J), list_range(max_head_index), list_range(L), vtype=GRB.INTEGER,
name='d_plus')
d_minus = mdl.addVars(list_range(J), list_range(max_head_index), list_range(L), vtype=GRB.INTEGER,
name='d_minus')
max_cycle = math.ceil(len(pcb_data) / max_head_index)
PU = mdl.addVars(list_range(-(max_head_index - 1) * ratio, S), list_range(L), vtype=GRB.INTEGER, name='PU')
WL = mdl.addVars(list_range(L), vtype=GRB.INTEGER, ub=max_cycle, name='WL')
NC = mdl.addVars(list_range(max_head_index), vtype=GRB.INTEGER, name='NC')
part_2_cpidx = defaultdict(int)
for idx, part in cpidx_2_part.items():
part_2_cpidx[part] = idx
if initial:
# initial some variables to speed up the search process
# ensure the priority of the workload assignment
cycle_index = sorted(range(len(cycle_assignment)), key=lambda k: cycle_assignment[k], reverse=True)
part_list = []
for cycle in cycle_index:
cycle_part = part_assignment[cycle]
for part in cycle_part:
if part != -1 and part not in part_list:
part_list.append(part)
slot = 0
for part in part_list:
if feeder_data is not None:
while slot in feeder_data.keys():
slot += 1 # skip assigned feeder slot
if part_2_cpidx[part] in part_feederbase.keys():
continue
part_feederbase[part_2_cpidx[part]] = slot
f[slot, part_2_cpidx[part]].Start = 1
slot += 1
for idx, cycle in enumerate(cycle_index):
WL[idx].Start = cycle_assignment[cycle]
for h in range(max_head_index):
part = part_assignment[cycle][h]
if part == -1:
continue
i = part_2_cpidx[part]
y[i, h, idx].Start = 1
v[part_feederbase[i], h, idx].Start = 1
# === Objective ===
mdl.setObjective(Fit_cy * quicksum(WL[l] for l in range(L)) + 2 * Fit_nz * quicksum(
NC[h] for h in range(max_head_index)) + Fit_pu * quicksum(
PU[s, l] for s in range(-(max_head_index - 1) * ratio, S) for l in range(L)))
# === Constraint ===
if not partition:
mdl.addConstrs(WL[l] <= 1 for l in range(L))
# work completion
mdl.addConstrs(c[i, h, l] == WL[l] * y[i, h, l] for i in range(I) for h in range(max_head_index) for l in range(L))
# mdl.addConstrs(
# c[i, h, l] <= max_cycle * y[i, h, l] for i in range(I) for h in range(max_head_index) for l in range(L))
# mdl.addConstrs(c[i, h, l] <= WL[l] for i in range(I) for h in range(max_head_index) for l in range(L))
# mdl.addConstrs(
# c[i, h, l] >= WL[l] - max_cycle * (1 - y[i, h, l]) for i in range(I) for h in range(max_head_index) for l in
# range(L))
mdl.addConstrs(
quicksum(c[i, h, l] for h in range(max_head_index) for l in range(L)) == component_list[cpidx_2_part[i]] for i
in range(I))
# variable constraint
mdl.addConstrs(quicksum(y[i, h, l] for i in range(I)) <= 1 for h in range(max_head_index) for l in range(L))
# simultaneous pick
for s in range(-(max_head_index - 1) * ratio, S):
rng = list(range(max(0, -math.floor(s / ratio)), min(max_head_index, math.ceil((S - s) / ratio))))
for l in range(L):
mdl.addConstr(quicksum(v[s + h * ratio, h, l] for h in rng) <= max_head_index * p[s, l])
mdl.addConstr(quicksum(v[s + h * ratio, h, l] for h in rng) >= p[s, l])
mdl.addConstrs(PU[s, l] == p[s, l] * WL[l] for s in range(-(max_head_index - 1) * ratio, S) for l in range(L))
# mdl.addConstrs(PU[s, l] <= max_cycle * p[s, l] for s in range(-(max_head_index - 1) * ratio, S) for l in range(L))
# mdl.addConstrs(PU[s, l] <= WL[l] for s in range(-(max_head_index - 1) * ratio, S) for l in range(L))
# mdl.addConstrs(
# PU[s, l] >= WL[l] - max_cycle * (1 - p[s, l]) for s in range(-(max_head_index - 1) * ratio, S) for l in
# range(L))
# nozzle change
mdl.addConstrs(
z[j, h, l] - z[j, h, l + 1] == d_plus[j, h, l] - d_minus[j, h, l] for l in range(L - 1) for j in range(J) for h
in range(max_head_index))
mdl.addConstrs(z[j, h, 0] - z[j, h, L - 1] == d_plus[j, h, L - 1] - d_minus[j, h, L - 1] for j in range(J) for h
in range(max_head_index))
mdl.addConstrs(
2 * d[l, h] == quicksum(d_plus[j, h, l] for j in range(J)) + quicksum(d_minus[j, h, l] for j in range(J)) for l
in range(L) for h in range(max_head_index))
mdl.addConstrs(NC[h] == quicksum(d[l, h] for l in range(L)) for h in range(max_head_index))
mdl.addConstrs(quicksum(y[i, h, l] for i in range(I) for h in range(max_head_index)) * M >= WL[l] for l in range(L))
# nozzle-component compatibility
mdl.addConstrs(
y[i, h, l] <= quicksum(HC[i][j] * z[j, h, l] for j in range(J)) for i in range(I) for h in range(max_head_index)
for l in range(L))
# available number of feeder
mdl.addConstrs(quicksum(f[s, i] for s in range(S)) <= component_feeder[cpidx_2_part[i]] for i in range(I))
# available number of nozzle
mdl.addConstrs(quicksum(z[j, h, l] for h in range(max_head_index)) <= max_head_index for j in range(J) for l in range(L))
# upper limit for occupation for feeder slot
mdl.addConstrs(quicksum(f[s, i] for i in range(I)) <= 1 for s in range(S))
mdl.addConstrs(
quicksum(v[s, h, l] for s in range(S)) >= quicksum(y[i, h, l] for i in range(I)) for h in range(max_head_index)
for l in range(L))
# others
mdl.addConstrs(quicksum(z[j, h, l] for j in range(J)) <= 1 for h in range(max_head_index) for l in range(L))
mdl.addConstrs(
quicksum(x[i, s, h, l] for h in range(max_head_index) for l in range(L)) >= f[s, i] for i in range(I)
for s in range(S))
mdl.addConstrs(
quicksum(x[i, s, h, l] for h in range(max_head_index) for l in range(L)) <= M * f[s, i] for i in
range(I) for s in range(S))
# mdl.addConstrs(
# f[s, i] >= x[i, s, h, l] for s in range(S) for i in range(I) for h in range(max_head_index) for l in range(L))
#
# mdl.addConstrs(
# quicksum(x[i, s, h, l] for h in range(max_head_index) for l in range(L)) >= f[s, i] for s in
# range(S) for i in range(I))
# the constraints to speed up the search process
mdl.addConstrs(
quicksum(x[i, s, h, l] for i in range(I) for s in range(S)) <= 1 for h in range(max_head_index) for l
in range(L))
if reduction:
mdl.addConstrs(WL[l] >= WL[l + 1] for l in range(L - 1))
mdl.addConstr(quicksum(WL[l] for l in range(L)) <= sum(cycle_assignment))
mdl.addConstr(quicksum(WL[l] for l in range(L)) >= math.ceil(len(pcb_data) / max_head_index))
mdl.addConstrs(quicksum(z[j, h, l] for j in range(J) for h in range(max_head_index)) >= quicksum(
z[j, h, l + 1] for j in range(J) for h in range(max_head_index)) for l in range(L - 1))
mdl.addConstrs(y[i, h, l] <= WL[l] for i in range(I) for h in range(max_head_index) for l in range(L))
mdl.addConstrs(v[s, h, l] <= WL[l] for s in range(S) for h in range(max_head_index) for l in range(L))
mdl.addConstrs(
x[i, s, h, l] >= y[i, h, l] + v[s, h, l] - 1 for i in range(I) for s in range(S) for h in range(max_head_index)
for l in range(L))
mdl.addConstrs(
x[i, s, h, l] <= y[i, h, l] for i in range(I) for s in range(S) for h in range(max_head_index)
for l in range(L))
mdl.addConstrs(
x[i, s, h, l] <= v[s, h, l] for i in range(I) for s in range(S) for h in range(max_head_index)
for l in range(L))
# === search process ===
mdl.update()
# mdl.write('mdl.lp')
if hinter:
print('num of constrs: ', str(len(mdl.getConstrs())), ', num of vars: ', str(len(mdl.getVars())))
mdl.optimize(terminate_condition)
# mdl.optimize()
# === result generation ===
opt_res_list = defaultdict(OptResult)
if mdl.Status == GRB.OPTIMAL or mdl.Status == GRB.INTERRUPTED or mdl.Status == GRB.TIME_LIMIT:
# === selection from solution pool ===
component_pos = defaultdict(list[Point])
for _, data in pcb_data.iterrows():
component_index = component_data[component_data.part == data.part].index.tolist()[0]
component_pos[component_index].append(Point(data.x, data.y))
for part in component_pos.keys():
component_pos[part] = sorted(component_pos[part], key=lambda pos: (pos.x, pos.y))
min_dist, solution_number = None, 0
for sol_counter in range(mdl.SolCount):
mdl.Params.SolutionNumber = sol_counter
opt_res = OptResult()
# == 转换标准的贴装头分配的解 ===
for l in range(L):
if abs(WL[l].Xn) <= 1e-4:
continue
opt_res.cycle_assign.append(round(WL[l].Xn))
opt_res.component_assign.append([-1] * max_head_index)
opt_res.feeder_slot_assign.append([-1] * max_head_index)
for h in range(max_head_index):
for i in range(I):
if abs(y[i, h, l].Xn) <= 1e-4:
continue
opt_res.component_assign[-1][h] = i
for s in range(S):
if abs(v[s, h, l].Xn - 1) < 1e-4 and opt_res.component_assign[-1][h] != -1:
opt_res.feeder_slot_assign[-1][h] = s
# 根据贴装头位置,转换供料器槽位
cp_avg_head, cp_sum_cycle = defaultdict(float), defaultdict(int)
for cycle, component_assign in enumerate(opt_res.component_assign):
for head, part in enumerate(component_assign):
if part == -1:
continue
cp_avg_head[part] += opt_res.cycle_assign[cycle] * head
cp_sum_cycle[part] += opt_res.cycle_assign[cycle]
for part, head in cp_avg_head.items():
cp_avg_head[part] = head / cp_sum_cycle[part]
avg_position = sum([data.x - cp_avg_head[part_2_cpidx[data.part]] * head_interval for _, data in
pcb_data.iterrows()]) / len(pcb_data)
avg_slot = 0
D_PU, D_PL, D_BW, D_FW = 0, 0, 0, 0
for cycle, slots in enumerate(opt_res.feeder_slot_assign):
min_slot, max_slot = max_slot_index, 0
for head, slot in enumerate(slots):
if slot == -1:
continue
min_slot = min(min_slot, slot - head * ratio)
max_slot = max(max_slot, slot - head * ratio)
avg_slot += (max_slot - min_slot) * opt_res.cycle_assign[cycle]
D_PU += (max_slot - min_slot) * slot_interval * opt_res.cycle_assign[cycle] # 拾取路径
avg_slot /= sum(opt_res.cycle_assign)
start_slot = round((avg_position + stopper_pos[0] - slotf1_pos[0]) / slot_interval + avg_slot / 2) + 1
for cycle in range(len(opt_res.feeder_slot_assign)):
for head in range(max_head_index):
if (slot := opt_res.feeder_slot_assign[cycle][head]) == -1:
continue
opt_res.feeder_slot_assign[cycle][head] = start_slot + slot * (2 if ratio == 1 else 1)
component_pos_counter = defaultdict(int)
cycle_place_pos = defaultdict(list[Point])
for head in range(max_head_index):
for cycle in range(len(opt_res.cycle_assign)):
if (part := opt_res.component_assign[cycle][head]) == -1:
continue
avg_place_pos = Point(0, 0, _h=head)
for counter in range(round(opt_res.cycle_assign[cycle])):
avg_place_pos.x = (1 - 1.0 / (counter + 1)) * avg_place_pos.x + (
component_pos[part][component_pos_counter[part]].x - head * head_interval) / (
counter + 1)
avg_place_pos.y = (1 - 1.0 / (counter + 1)) * avg_place_pos.y + component_pos[part][
component_pos_counter[part]].y / (counter + 1)
component_pos_counter[part] += 1
avg_place_pos.x += stopper_pos[0]
avg_place_pos.y += stopper_pos[1]
cycle_place_pos[cycle].append(avg_place_pos)
for cycle in range(len(opt_res.cycle_assign)):
min_slot, max_slot = max_slot_index, 0
for head in range(max_head_index):
if (slot := opt_res.feeder_slot_assign[cycle][head]) == -1:
continue
min_slot = min(min_slot, slot - head * interval_ratio)
max_slot = max(max_slot, slot - head * interval_ratio)
# cycle_place_pos[cycle] = sorted(cycle_place_pos[cycle], key=lambda pt: pt.x)
pick_pos = Point(slotf1_pos[0] + (min_slot + max_slot) / 2 * slot_interval, slotf1_pos[1])
_, seq = dynamic_programming_cycle_route(cycle_place_pos[cycle], pick_pos)
head_position = [Point(0, 0) for _ in range(max_head_index)]
for point in cycle_place_pos[cycle]:
head_position[point.h] = point
for idx in range(len(seq) - 1):
h1, h2 = seq[idx], seq[idx + 1]
D_PL += max(abs(head_position[h1].x - head_position[h2].x),
abs(head_position[h1].y - head_position[h2].y)) * opt_res.cycle_assign[cycle]
# opt_res.placement_assign, opt_res.head_sequence = scan_based_placement_route_generation(component_data,
# pcb_data,
# opt_res.component_assign,
# opt_res.cycle_assign,
# opt_res.feeder_slot_assign,
# hinter=False)
# info = placement_info_evaluation(component_data, pcb_data, opt_res)
# print(f'{info.place_distance + info.pickup_distance: .3f}\t{D_PL + D_PU: .3f}')
opt_res_list[sol_counter] = opt_res
solution_number = 0
mdl.Params.SolutionNumber = 0
if hinter:
print('total cost = {}'.format(mdl.objval))
print('cycle = {}, nozzle change = {}, pick up = {}'.format(quicksum(WL[l].Xn for l in range(L)), quicksum(
NC[h].Xn for h in range(max_head_index)), quicksum(
PU[s, l].Xn for s in range(-(max_head_index - 1) * ratio, S) for l in range(L))))
print('workload: ')
for l in range(L):
print(WL[l].Xn, end=', ')
print('')
print('result')
print('component assignment: ', opt_res_list[solution_number].component_assign)
print('feeder assignment: ', opt_res_list[solution_number].feeder_slot_assign)
print('cycle assignment: ', opt_res_list[solution_number].cycle_assign)
return opt_res_list[solution_number].component_assign, opt_res_list[solution_number].feeder_slot_assign, \
opt_res_list[solution_number].cycle_assign

File diff suppressed because it is too large Load Diff

View File

@@ -1,83 +1,205 @@
import random
import copy
from base_optimizer.optimizer_common import *
def load_data(filename: str, default_feeder_limit=1, load_cp_data=True, load_feeder_data=True, cp_auto_register=False):
# 读取PCB数据
def load_data(filename: str, load_feeder=False, auto_register=True):
filename = 'data/' + filename
pcb_data = pd.DataFrame(pd.read_csv(filepath_or_buffer=filename, sep='\t', header=None))
if len(pcb_data.columns) <= 17:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar",
part_content, step_content, feeder_content = False, False, False
part_start_line, step_start_line, feeder_start_line = -1, -1, -1
part_end_line, step_end_line, feeder_end_line = -1, -1, -1
line_counter = 0
with open(filename, 'r') as file:
line = file.readline()
while line:
if line == '[Part]\n':
part_content, part_start_line = True, line_counter
elif line == '[Step]\n':
step_content, step_start_line = True, line_counter
elif line == '[Feeder]\n':
feeder_content, feeder_start_line = True, line_counter
elif line == '\n':
if part_content:
part_content, part_end_line = False, line_counter
elif step_content:
step_content, step_end_line = False, line_counter
elif feeder_content:
feeder_content, feeder_end_line = False, line_counter
line_counter += 1
line = file.readline()
if part_content:
part_end_line = line_counter
elif feeder_content:
feeder_end_line = line_counter
else:
step_end_line = line_counter
file_data = pd.DataFrame(
pd.read_csv(filepath_or_buffer=filename, skiprows=step_start_line + 1, nrows=step_end_line - step_start_line + 1,
sep='\t', header=None))
if len(file_data.columns) == 22:
data_col = ["machine", "bl", "ref", "x", "y", "z", "r", "part", "desc", "group", "fdr", "nz", "hd", "cs", "cy",
"sk", "ar", "fid", "pop", "pl", "lv", "pr"]
elif len(file_data.columns) <= 17:
data_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar",
"pl", "lv"]
elif len(pcb_data.columns) <= 18:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
elif len(file_data.columns) <= 18:
data_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
"pl", "lv"]
else:
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
data_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "fid",
"", "pl", "lv"]
pcb_data.columns = step_col
pcb_data = pcb_data.dropna(axis=1)
file_data.columns = data_col
pcb_data, component_data, feeder_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame), defaultdict(
pd.DataFrame)
# 坐标系处理
# pcb_data = pcb_data.sort_values(by = ['x', 'y'], ascending = True)
# pcb_data["x"] = pcb_data["x"].apply(lambda x: -x)
# line_data = line_data.dropna(axis=1)
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "pl", "lv"]
machine_name = defaultdict(int)
for _, data in file_data.iterrows():
if "machine" in file_data.columns:
if data['machine'] not in machine_name.keys():
machine_name[data['machine']] = len(machine_name)
# 注册元件检查
part_feeder_assign = defaultdict(set)
part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points']
machine_index = machine_name[data['machine']]
else:
machine_index = 0
# pcb_data[machine_index] = pcb_data[machine_index]._append(data[step_col], ignore_index=True)
pcb_data[machine_index] = pd.concat([pcb_data[machine_index], pd.DataFrame(data[step_col]).T], ignore_index=True)
part_col = ["part", "fdr", "nz", 'fdn']
try:
if load_cp_data:
component_data = pd.DataFrame(pd.read_csv(filepath_or_buffer='component.txt', sep='\t', header=None),
columns=part_col)
if part_start_line != -1:
part_data = pd.DataFrame(
pd.read_csv(filepath_or_buffer=filename, sep='\t', header=None, skiprows=part_start_line + 1,
nrows=part_end_line - part_start_line - 1))
part_data.columns = part_col
else:
component_data = pd.DataFrame(columns=part_col)
part_data = pd.DataFrame(columns=part_col)
except:
component_data = pd.DataFrame(columns=part_col)
part_data = pd.DataFrame(columns=part_col)
part_data['points'] = 0
part_col = ["part", "fdr", "nz", 'fdn', 'points']
machine_num = len(pcb_data)
for machine_index in range(machine_num):
component_data[machine_index] = pd.DataFrame(columns=part_col)
component_slot = defaultdict(set)
for idx, data in pcb_data[machine_index].iterrows():
if pos := data.fdr.find('F') != 0:
pcb_data[machine_index].loc[idx, 'fdr'] = data.fdr[pos:pos + 1:1] + data.fdr[pos + 2::]
if pos := data.nz.find('F') != -1:
pcb_data[machine_index].loc[idx, 'nz'] = data.nz[0:pos:1] + data.nz[pos + 1::]
if isinstance(data.hd, str) and (pos := data.hd.find('F') != -1):
pcb_data[machine_index].loc[idx, 'hd'] = int(data.hd[pos + 2::])
for _, data in pcb_data.iterrows():
part, nozzle = data.part, data.nz.split(' ')[1]
slot = data['fdr'].split(' ')[0]
if part not in component_data['part'].values:
if not cp_auto_register:
raise Exception("unregistered component: " + component_data['part'].values)
if part not in component_data[machine_index]['part'].values:
if not auto_register:
raise Exception("unregistered component: " + component_data[machine_index]['part'].values)
else:
component_data = pd.concat([component_data, pd.DataFrame(
[part, '', 'SM8', nozzle, '飞行相机1', 'CHIP-Rect', default_feeder_limit, 0], index=part_col).T],
ignore_index=True)
component_data[machine_index] = pd.concat([component_data[machine_index], pd.DataFrame(
[part, 'SM8', nozzle, 0, 0], index=part_col).T], ignore_index=True)
# warning_info = 'register component ' + part + ' with default feeder type'
# warnings.warn(warning_info, UserWarning)
part_index = component_data[component_data['part'] == part].index.tolist()[0]
part_feeder_assign[part].add(slot)
component_data.loc[part_index]['points'] += 1
if nozzle != 'A' and component_data.loc[part_index]['nz'] != nozzle:
warning_info = 'the nozzle type of component ' + part + ' is not consistent with the pcb data'
part_index = component_data[machine_index][component_data[machine_index]['part'] == part].index.tolist()[0]
component_data[machine_index].loc[part_index, 'points'] += 1
if (fdr := data['fdr'].split(' ')[0]) not in component_slot[part]:
component_data[machine_index].loc[part_index, 'fdn'] += 1
component_slot[part].add(fdr)
for idx, data in part_data.iterrows():
if data.part in component_slot.keys():
part_data.loc[idx, 'fdn'] = part_data.loc[idx, 'fdn'] - len(component_slot[data.part])
assert part_data.loc[idx, 'fdn'] >= 0
for idx, data in part_data.iterrows():
for machine_index in range(machine_num):
if data.part not in component_data[machine_index].part.values:
continue
part_index = component_data[machine_index][component_data[machine_index].part == data.part].index.tolist()[
0]
if component_data[machine_index].loc[part_index].nz != data.nz:
component_data[machine_index].loc[part_index].nz = data.nz
warning_info = 'the nozzle type of component ' + data.part + ' is not consistent with the pcb data'
warnings.warn(warning_info, UserWarning)
component_data[machine_index].loc[part_index].fdr = data.fdr
for idx, data in component_data.iterrows():
if data.fdn == 0:
continue
if data.part in component_data[0].part.values:
part_index = component_data[0][component_data[0].part == data.part].index.tolist()[0]
component_data[0].loc[part_index, 'fdn'] += data.fdn
else:
component_data[0] = pd.concat([component_data[0], pd.DataFrame(data).T], ignore_index=True)
for machine_index in range(machine_num):
for idx, data in component_data[machine_index].iterrows():
if data['fdr'][0:3] == 'SME': # 电动供料器和气动供料器参数一致
component_data.at[idx, 'fdr'] = data['fdr'][0:2] + data['fdr'][3:]
component_data[machine_index].at[idx, 'fdr'] = data['fdr'][0:2] + data['fdr'][3:]
# pcb_data[machine_index].sort_values(by="x", ascending=False, inplace=True)
# pcb_data[machine_index].reset_index(inplace=True)
# 读取供料器基座数据
feeder_data = pd.DataFrame(columns=['slot', 'part', 'arg']) # arg表示是否为预分配不表示分配数目
if load_feeder_data:
for _, data in pcb_data.iterrows():
slot, part = data['fdr'].split(' ')
if slot[0] != 'F' and slot[0] != 'R':
continue
slot = int(slot[1:]) if slot[0] == 'F' else int(slot[1:]) + max_slot_index // 2
feeder_data = pd.concat([feeder_data, pd.DataFrame([slot, part, 1]).T])
feeder_columns = ['slot', 'part']
if load_feeder:
try:
if feeder_start_line != -1:
feeder_data = pd.DataFrame(
pd.read_csv(filepath_or_buffer=filename, sep='\t', header=None, skiprows=feeder_start_line + 1,
nrows=feeder_end_line - feeder_start_line - 1))
feeder_data.columns = feeder_columns
else:
feeder_data = pd.DataFrame(columns=feeder_columns)
except:
feeder_data = pd.DataFrame(columns=feeder_columns)
else:
feeder_data = pd.DataFrame(columns=feeder_columns)
feeder_data.drop_duplicates(subset='slot', inplace=True, ignore_index=True)
# 随机移除部分已安装的供料器
if load_feeder_data == 2:
drop_index = random.sample(list(range(len(feeder_data))), len(feeder_data) // 2)
feeder_data.drop(index=drop_index, inplace=True)
for idx, data in feeder_data.iterrows():
feeder_data.at[idx, 'slot'] = int(data['slot'][1:])
feeder_data.sort_values(by='slot', ascending=True, inplace=True, ignore_index=True)
pcb_data = pcb_data.sort_values(by="x", ascending=False)
feeder_data_check = defaultdict(str)
for _, data in feeder_data.iterrows():
feeder_data_check[data.slot] = data.part
for machine_index in range(machine_num):
for idx, data in pcb_data[machine_index].iterrows():
slot, part = data.fdr.split(' ')
slot = int(slot[1:])
if feeder_data_check[slot] == '':
feeder_data_check[slot] = part
if feeder_data_check[slot] != part:
warning_info = f'conflict feeder registration PCB: {data.fdr}, BASE: F{slot} {feeder_data_check[slot]}'
# warnings.warn(warning_info, UserWarning)
return pcb_data, component_data, feeder_data
def merge_data(partial_pcb_data, partial_component_data):
assert len(partial_pcb_data) == len(partial_component_data)
machine_num = len(partial_pcb_data)
pcb_data, component_data = copy.deepcopy(partial_pcb_data[0]), copy.deepcopy(partial_component_data[0])
for machine_index in range(1, machine_num):
pcb_data = pd.concat([pcb_data, partial_pcb_data[machine_index]], ignore_index=True)
for _, data in partial_component_data[machine_index].iterrows():
if data.part in component_data.part.values:
part_index = component_data[component_data.part == data.part].index.tolist()[0]
component_data.loc[part_index, 'points'] += data.points
component_data.loc[part_index, 'fdn'] += data.fdn
else:
component_data = pd.concat([component_data, pd.DataFrame(data).T], ignore_index=True)
component_data = component_data[component_data['points'] != 0].reset_index(drop=True)
return pcb_data, component_data

622
estimator.py Normal file
View File

@@ -0,0 +1,622 @@
from generator import *
from base_optimizer.optimizer_interface import *
def exact_assembly_time(pcb_data, component_data):
feeder_data = pd.DataFrame(columns=['slot', 'part'])
component_result, cycle_result, feeder_slot_result = feeder_priority_assignment(component_data, pcb_data,
feeder_data, hinter=False)
placement_result, head_sequence_result = place_allocate_sequence_route_generation(component_data, pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
opt_res = OptResult(component_result, cycle_result, feeder_slot_result, placement_result, head_sequence_result)
info = placement_info_evaluation(component_data, pcb_data, opt_res)
# return info.metric()
return info.total_time
def error_info(pred_val, real_val, type='train'):
absolute_error = np.array([])
for idx, (t1, t2) in enumerate(np.nditer([pred_val, real_val])):
absolute_error = np.append(absolute_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
if absolute_error[-1] > 10:
print(f'\033[0;31;31midx: {idx + 1: d}, net: {t1: .3f}, real: {t2: .3f}, '
f'gap: {absolute_error[-1]: .3f}\033[0m')
print('')
print(f'mean absolute prediction error for {type} data : {np.average(absolute_error): .2f}% ')
print(f'maximum absolute prediction error for {type} data : {np.max(absolute_error): .2f}% ')
def converter(pcb_data, component_data, assignment):
cp_items = defaultdict(list)
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
_, partial_component_data = convert_line_assigment(None, component_data, assignment)
for machine_index in range(len(assignment)):
cp_item_index = 0
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for _, data in partial_component_data[machine_index].iterrows():
feeder_limit, total_points = data.fdn, data.points
surplus_points = total_points % feeder_limit
for _ in range(feeder_limit):
div_points = math.floor(total_points / feeder_limit)
if surplus_points:
div_points += 1
surplus_points -= 1
cp_points[cp_item_index], cp_nozzle[cp_item_index] = div_points, data.nz
cp_item_index += 1
cp_items[machine_index] = [cp_points, cp_nozzle, board_width, board_height]
return cp_items
class Net(torch.nn.Module):
def __init__(self, input_size, hidden_size=1000, output_size=1):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU() # 激活函数
self.fc2 = torch.nn.Linear(hidden_size, hidden_size)
# self.relu1 = torch.nn.ReLU() # 激活函数
self.fc3 = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.fc1(x)
# x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
class LSTMNet(torch.nn.Module):
def __init__(self, input_size, hidden_size=256, output_size=1, num_layers=1):
super(LSTMNet, self).__init__()
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers)
self.fc = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
x, _ = self.lstm(x) # x is input with size (seq_len, batch_size, input_size)
x = self.fc(x)
return x[-1, :, ]
class Estimator:
def __init__(self):
self.data_mgr = DataMgr()
@staticmethod
def training(self, params):
pass
@staticmethod
def testing(self, params):
pass
@staticmethod
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
class NeuralEstimator(Estimator):
def __init__(self):
super().__init__()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device)
self.net_file = 'model/net_model.pth'
if os.path.exists(self.net_file):
try:
self.net.load_state_dict(torch.load(self.net_file))
except:
warnings.warn('the parameters of neural net model load failed', UserWarning)
def init_weights(self):
for m in self.net.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
def training(self, params):
self.init_weights() # 初始化参数
data = data_mgr.loader('opt/' + params.train_file)
x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()]))
y_train = np.array(data[1][::data_mgr.get_update_round()])
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device)
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device)
optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
loss_func = torch.nn.MSELoss()
for epoch in range(params.num_epochs):
pred = self.net(x_train)
loss = loss_func(pred, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# scheduler.step()
if epoch % 100 == 0:
print('Epoch: ', epoch, ', Loss: ', loss.item())
if loss.item() < 1e-4:
break
net_predict = self.net(x_train).view(-1)
pred_time, real_time = net_predict.cpu().detach().numpy(), y_train.view(-1).cpu().detach().numpy()
error_info(pred_time, real_time)
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
torch.save(self.net.state_dict(), self.net_file)
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device)
self.net.eval()
with torch.no_grad():
pred_time = self.net(x_test).view(-1).cpu().detach().numpy()
error_info(pred_time, y_test.reshape(-1), 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
assert board_width is not None and board_height is not None
encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda")
return self.net(encoding)[0, 0].item()
class HeuristicEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/heuristic_lr_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
self.lr.fit(x_fit, y_fit)
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
with open(self.pickle_file, 'wb') as f:
pickle.dump(self.lr, f)
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict)
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_fit = [self.heuristic_genetic(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_genetic(cp_points, cp_nozzle)).reshape(1, -1))
def heuristic_genetic(self, cp_points, cp_nozzle):
nozzle_points, nozzle_component_points = defaultdict(int), defaultdict(list)
for idx, nozzle in cp_nozzle.items():
if cp_points[idx] == 0:
continue
nozzle_points[nozzle] += cp_points[idx]
nozzle_component_points[cp_nozzle[idx]] = [0] * len(cp_points)
for idx, (part_index, points) in enumerate(cp_points.items()):
nozzle_component_points[cp_nozzle[part_index]][idx] = points
nl = sum(cp_points.values()) # num of placement points
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
# assignments of nozzles to heads
wl = 0 # num of workload
total_heads = (1 + ul) * max_head_index - len(nozzle_points)
nozzle_heads = defaultdict(int)
for nozzle in nozzle_points.keys():
if nozzle_points[nozzle] == 0:
continue
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / nl * total_heads)
nozzle_heads[nozzle] += 1
total_heads = (1 + ul) * max_head_index
for heads in nozzle_heads.values():
total_heads -= heads
while True:
nozzle = max(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x])
if total_heads == 0:
break
nozzle_heads[nozzle] += 1
total_heads -= 1
# averagely assign placements to heads
heads_placement = []
for nozzle in nozzle_heads.keys():
points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle])
heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])]
nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points)
for idx in range(len(heads_placement) - 1, -1, -1):
if nozzle_points[nozzle] <= 0:
break
nozzle_points[nozzle] -= 1
heads_placement[idx][1] += 1
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
# the number of pick-up operations
# (under the assumption of the number of feeder available for each comp. type is equal 1)
pl = 0
heads_placement_points = [0 for _ in range(max_head_index)]
while True:
head_assign_point = []
for head in range(max_head_index):
if heads_placement_points[head] != 0 or heads_placement[head] == 0:
continue
nozzle, points = heads_placement[head]
max_comp_index = np.argmax(nozzle_component_points[nozzle])
heads_placement_points[head] = min(points, nozzle_component_points[nozzle][max_comp_index])
nozzle_component_points[nozzle][max_comp_index] -= heads_placement_points[head]
head_assign_point.append(heads_placement_points[head])
min_points_list = list(filter(lambda x: x > 0, heads_placement_points))
if len(min_points_list) == 0 or len(head_assign_point) == 0:
break
pl += max(head_assign_point)
for head in range(max_head_index):
heads_placement[head][1] -= min(min_points_list)
heads_placement_points[head] -= min(min_points_list)
return [nl, wl, ul]
class ReconfigEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/reconfig_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
self.lr.fit(x_fit, y_fit)
if params.save:
if not os.path.exists('model'):
os.mkdir('model')
with open(self.pickle_file, 'wb') as f:
pickle.dump(self.lr, f)
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict)
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
x_fit = [self.heuristic_reconfig(cp_points, cp_nozzle) for cp_points, cp_nozzle, _, _ in data[0]]
y_fit = np.array([data[1]]).T
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
return self.lr.predict(np.array(self.heuristic_reconfig(cp_points, cp_nozzle)).reshape(1, -1))[0, 0]
def heuristic_reconfig(self, cp_points, cp_nozzle):
task_block_number, total_point_number = 0, sum(cp_points.values())
if total_point_number == 0:
return [total_point_number, task_block_number]
nozzle_points, nozzle_heads = defaultdict(int), defaultdict(int)
for part, points in cp_points.items():
nozzle_points[cp_nozzle[part]] += points
nozzle_heads[cp_nozzle[part]] = 1
remaining_head = max_head_index - len(nozzle_heads)
nozzle_fraction = []
for nozzle, points in nozzle_points.items():
val = remaining_head * points / total_point_number
nozzle_heads[nozzle] += math.floor(val)
nozzle_fraction.append([nozzle, val - math.floor(val)])
remaining_head = max_head_index - sum(nozzle_heads.values())
sorted(nozzle_fraction, key=lambda x: x[1])
nozzle_fraction_index = 0
while remaining_head > 0:
nozzle_heads[nozzle_fraction[nozzle_fraction_index][0]] += 1
remaining_head -= 1
for nozzle, heads_number in nozzle_heads.items():
task_block_number = max(task_block_number, math.ceil(nozzle_points[nozzle] / heads_number))
return [total_point_number, task_block_number]
class SVREstimator(Estimator):
def __init__(self):
super().__init__()
# === symbiotic organism search parameter ===
# population of meta heuristic: 20
# number of iteration: 100
self.population_size = 20
self.num_iteration = 100
self.w_quart = 1.5
# === support vector regression parameters ===
self.kernel_func = "rbf"
self.C_range = [0.1, 10]
self.gamma_range = [0.01, 0.5]
self.epsilon_range = [0.01, 0.1]
self.benefit_factor = [1, 2]
# number of folds: 5
self.num_folds = 5
self.svr_list = [SVR() for _ in range(self.num_folds + 1)]
for i in range(self.num_folds + 1):
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
if not os.path.exists(pickle_file):
continue
with open(pickle_file, 'rb') as f:
self.svr_list[i] = pickle.load(f)
self.pbar = tqdm(total=self.num_iteration * self.num_folds * self.population_size)
self.pbar.set_description('svr training process')
def training(self, params):
data = data_mgr.loader('opt/' + params.train_file)
Q1, Q3 = np.percentile(np.array(data[1]), 25), np.percentile(np.array(data[1]), 75)
indices = [i for i in range(len(data[1])) if Q1 - self.w_quart * (Q3 - Q1) <= data[1][i] <= Q3 + self.w_quart * (Q3 - Q1)]
data[0], data[1] = [data[0][i] for i in indices], [data[1][i] for i in indices]
self.svr_list = []
division = len(data[0]) // self.num_folds
for cnt in range(self.num_folds):
x_train, y_train = data[0], data[1]
x_train = [[sum(x_train[i][0].values()), x_train[i][2], x_train[i][3]] for i in range(len(data[0])) if
not cnt * division <= i < (cnt + 1) * division]
y_train = [y_train[i] for i in range(len(data[0])) if not cnt * division <= i < (cnt + 1) * division]
self.svr_list.append(self.sos_svr_training(x_train, y_train))
final_input, final_output = [], []
for cnt in range(self.num_folds):
x_valid = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0])) if
cnt * division <= i < (cnt + 1) * division]
final_input.extend([[v] for v in self.svr_list[cnt].predict(x_valid)])
final_output.extend(
[data[1][i] for i in range(len(data[0])) if cnt * division <= i < (cnt + 1) * division])
self.svr_list.append(self.sos_svr_training(final_input, final_output))
if params.save:
for i in range(self.num_folds + 1):
pickle_file = 'model/svr' + str(i + 1) + '_model.pkl'
with open(pickle_file, 'wb') as f:
pickle.dump(self.svr_list[i], f)
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
predict_y = []
for cnt in range(self.num_folds):
predict_y.extend(self.svr_list[cnt].predict(predict_x))
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
error_info(data[1], predict_val)
def sos_svr_training(self, x_train, y_train):
population = []
for _ in range(self.population_size):
svr_param = [random.uniform(self.C_range[0], self.C_range[1]),
random.uniform(self.gamma_range[0], self.gamma_range[1]),
random.uniform(self.epsilon_range[0], self.epsilon_range[1])]
population.append(SVR(kernel=self.kernel_func, C=svr_param[0], gamma=svr_param[1], epsilon=svr_param[2]))
population_val = []
for individual in population:
population_val.append(self.svr_error(individual, x_train, y_train))
for _ in range(self.num_iteration):
best_svr = population[np.argmin(population_val)]
for i in range(self.population_size):
# === mutualism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
Mv_C, Mv_gamma, Mv_epsilon = (population[i].C + population[j].C) / 2, (
population[i].gamma + population[j].gamma) / 2, (
population[i].epsilon + population[j].epsilon) / 2
for idx, svr in zip([i, j], [population[i], population[j]]):
new_C = svr.C + random.random() * (best_svr.C - Mv_C * random.choice(self.benefit_factor))
new_gamma = svr.gamma + random.random() * (
best_svr.gamma - Mv_gamma * random.choice(self.benefit_factor))
new_epsilon = svr.epsilon + random.random() * (
best_svr.epsilon - Mv_epsilon * random.choice(self.benefit_factor))
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[idx]:
population[idx], population_val[idx] = new_svr, new_svr_val
# === commensalism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
new_C = population[i].C + random.uniform(-1, 1) * (best_svr.C - population[j].C)
new_gamma = population[i].gamma + random.uniform(-1, 1) * (best_svr.gamma - population[j].gamma)
new_epsilon = population[i].epsilon + random.uniform(-1, 1) * (
best_svr.epsilon - population[j].epsilon)
if new_C >= 0 and new_gamma >= 0 and new_epsilon >= 0:
new_svr = SVR(kernel=self.kernel_func, C=new_C, gamma=new_gamma, epsilon=new_epsilon)
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[j]:
population[j], population_val[j] = new_svr, new_svr_val
# === parasitism phase ===
while True:
j = random.randint(0, self.population_size - 1)
if i != j:
break
new_svr = copy.deepcopy(population[j])
idx = random.randint(0, 2)
if idx == 0:
new_svr.C = random.uniform(self.C_range[0], self.C_range[1])
elif idx == 1:
new_svr.gamma = random.uniform(self.gamma_range[0], self.gamma_range[1])
else:
new_svr.epsilon = random.uniform(self.epsilon_range[0], self.epsilon_range[1])
new_svr_val = self.svr_error(new_svr, x_train, y_train)
if new_svr_val < population_val[j]:
population[j], population_val[j] = new_svr, new_svr_val
self.pbar.update(1)
return population[np.argmin(population_val)]
def testing(self, params):
data = data_mgr.loader('opt/' + params.test_file)
predict_x = [[sum(data[0][i][0].values()), data[0][i][2], data[0][i][3]] for i in range(len(data[0]))]
predict_y = []
for cnt in range(self.num_folds):
predict_y.extend(self.svr_list[cnt].predict(predict_x))
input = [[np.average(predict_y[i:i + self.num_folds])] for i in range(len(predict_y) // self.num_folds)]
predict_val = self.svr_list[-1].predict(input)
error_info(data[1], predict_val, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
def svr_error(self, svr, x_train, y_train):
num_data = len(x_train)
num_division = len(x_train) // self.num_folds
pred_error = np.array([])
for cnt in range(self.num_folds):
x_fit = [x_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
y_fit = [y_train[i] for i in range(num_data) if not cnt * num_division <= i < (cnt + 1) * num_division]
svr.fit(x_fit, y_fit)
x_valid = [x_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
y_valid = [y_train[i] for i in range(num_data) if cnt * num_division <= i < (cnt + 1) * num_division]
for t1, t2 in np.nditer([y_valid, svr.predict(x_valid)]):
pred_error = np.append(pred_error, abs(t1 - t2) / (t2 + 1e-10) * 100)
return np.average(pred_error)
class MetricEstimator(Estimator):
def __init__(self):
super().__init__()
self.lr = LinearRegression()
self.pickle_file = 'model/metric_model.pkl'
if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f)
def training(self, params):
x_fit, y_fit = data_mgr.metric('opt/' + params.train_file)
self.lr.fit(x_fit, y_fit)
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict, 'train')
print(self.lr.coef_)
def testing(self, params):
x_fit, y_fit = data_mgr.metric('opt/' + params.test_file)
y_predict = self.lr.predict(x_fit)
error_info(y_fit, y_predict, 'test')
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
pass
if __name__ == '__main__':
warnings.simplefilter(action='ignore', category=FutureWarning)
parser = argparse.ArgumentParser(description='network training implementation')
# parser.add_argument('--train', default=True, type=bool, help='determine whether training the network')
parser.add_argument('--save', default=False, type=bool,
help='determine whether saving the parameters of network, linear regression model, etc.')
parser.add_argument('--overwrite', default=False, type=bool,
help='determine whether overwriting the training and testing data')
parser.add_argument('--train_file', default='train_data.txt', type=str, help='training file path')
parser.add_argument('--test_file', default='test_data.txt', type=str, help='testing file path')
parser.add_argument('--num_epochs', default=8000, type=int, help='number of epochs for training process')
parser.add_argument('--batch_size', default=2000, type=int, help='size of training batch')
parser.add_argument('--lr', default=1e-5, type=float, help='learning rate for the network')
parser.add_argument('--model', default='neural-network', help='method for assembly time estimation')
parser.add_argument('--machine_optimizer', default='feeder-priority', type=str, help='optimizer for single machine')
params = parser.parse_args()
data_mgr = DataMgr()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if params.overwrite:
file = {params.train_file: params.batch_size,
params.test_file: params.batch_size // data_mgr.get_update_round() // 5}
for file_name, file_batch_size in file.items():
with open('opt/' + file_name, 'a') as f:
for _ in range(int(file_batch_size)):
mode = file_name.split('.')[0].split('_')[0]
pcb_data, component_data = data_mgr.generator(mode) # random generate a PCB data
# data_mgr.remover() # remove the last saved data
# data_mgr.saver('data/' + file_name, pcb_data) # save new data
info = base_optimizer(1, pcb_data, component_data, pd.DataFrame(columns=['slot', 'part']), params,
hinter=True)
data_mgr.recorder(f, info, pcb_data, component_data)
f.close()
estimator = NeuralEstimator()
estimator.training(params)
estimator.testing(params)

487
generator.py Normal file
View File

@@ -0,0 +1,487 @@
import random
import numpy as np
import pandas as pd
from base_optimizer.optimizer_common import *
class DataMgr:
def __init__(self):
self.min_placement_points = 10
self.max_placement_points = 1000
self.max_component_types = 30
self.default_feeder_limit = 1
self.max_nozzle_types = 4
self.x_range = [50, 100, 150, 200, 300, 400, 500]
self.y_range = [50, 100, 150, 200, 300, 400, 500]
self.counter = 0
self.update = 1
self.pre_file = None
self.part_col = ["part", "fdr", "nz", 'fdn', 'points']
self.component_data = pd.DataFrame(columns=self.part_col) # the component list update for several rounds
def generator(self, mode='Train'):
boundary = [random.choice(self.x_range), random.choice(self.y_range)]
if boundary[0] < boundary[-1]:
boundary[0], boundary[-1] = boundary[-1], boundary[0]
nozzle_type_list = random.sample(['CN065', 'CN020', 'CN040', 'CN140'], self.max_nozzle_types)
# determine the nozzle type of component
if self.counter % self.get_update_round() == 0 or mode == 'test':
self.component_data = self.component_data.loc[[]]
total_points = random.randint(self.min_placement_points, self.max_placement_points)
total_nozzles = random.randint(1, self.max_nozzle_types)
selected_nozzle = random.sample(nozzle_type_list, total_nozzles)
for cp_idx in range(min(random.randint(1, self.max_component_types), total_points)):
part, nozzle = 'C' + str(cp_idx), random.choice(selected_nozzle)
self.component_data = pd.concat(
[self.component_data, pd.DataFrame([part, 'SM8', nozzle, 1, 0], index=self.part_col).T],
ignore_index=True)
random_fractions = np.random.rand(len(self.component_data))
normalized_fractions = random_fractions / random_fractions.sum()
for cp_idx, fraction in enumerate(normalized_fractions):
self.component_data.iloc[cp_idx].points = round(fraction * total_points)
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "pl", "lv"]
pcb_data = pd.DataFrame(columns=step_col)
idx = 1
for _, data in self.component_data.iterrows():
for _ in range(data.points):
part, nozzle = data.part, data.nz
pos_x, pos_y = np.random.uniform(0, boundary[0]), np.random.uniform(0, boundary[1])
pcb_data = pd.concat([pcb_data, pd.DataFrame([['R' + str(idx), -pos_x, pos_y,
0.000, 0.000, part, '', 'A', '1-0 ' + nozzle, 1, 1, 1, 0,
1, 1, 1, 'L0']], columns=pcb_data.columns)], ignore_index=True)
idx += 1
self.counter += 1
return pcb_data, self.component_data
def recorder(self, file_handle, info: OptInfo, pcb_data, component_data):
# 7个参数总时间周期数吸嘴更换数ANC往返次数拾取次数拾取路径贴装路径
lineinfo = '{:.3f}'.format(info.total_time) + '\t' + str(info.cycle_counter) + '\t' + str(
info.nozzle_change_counter) + '\t' + str(info.anc_round_counter) + '\t' + str(
info.pickup_counter) + '\t' + '{:.3f}'.format(info.pickup_distance) + '\t' + '{:.3f}'.format(
info.place_distance)
# 2个参数 PCB尺寸
lineinfo += '\t' + '{:.3f}'.format(pcb_data['x'].max() - pcb_data['x'].min()) + '\t' + '{:.3f}'.format(
pcb_data['y'].max() - pcb_data['y'].min())
part_position = defaultdict(list)
for _, data in pcb_data.iterrows():
part_position[data['part']].append([data['x'], data['y']])
point_counter, component_counter = 0, 0
nozzle_type = set()
for _, data in component_data.iterrows():
if data.points == 0:
continue
nozzle_type.add(data.nz)
point_counter += data.points
component_counter += 1
# 3个参数总点数总元件数总吸嘴数
lineinfo += '\t' + str(point_counter) + '\t' + str(component_counter) + '\t' + str(len(nozzle_type))
# 5 x 元件种类数 个参数: 元件名,吸嘴类型,点数,布局宽度,布局高度
for _, data in component_data.iterrows():
if data.points == 0:
continue
lineinfo += '\t' + data.part + '\t' + data.nz + '\t' + str(data.points)
lineinfo += '\t' + '{:.3f}'.format(np.ptp([pos[0] for pos in part_position[data.part]]))
lineinfo += '\t' + '{:.3f}'.format(np.ptp([pos[1] for pos in part_position[data.part]]))
lineinfo += '\n'
file_handle.write(lineinfo)
def saver(self, file_path: str, pcb_data):
lineinfo = ''
for _, data in pcb_data.iterrows():
lineinfo += '\t' + '{:.3f}'.format(data.x) + '\t' + '{:.3f}'.format(
data.y) + '\t0.000\t0.000\t' + data.part + '\t\tA\t' + data.nz + '\t1\t1\t1\t1\t1\t1\t1\tN\tL0\n'
pos = file_path.find('.')
file_path = file_path[:pos] + '-' + str(self.counter) + file_path[pos:]
with open(file_path, 'w') as f:
f.write(lineinfo)
f.close()
self.pre_file = file_path
def remover(self):
if self.pre_file is not None:
os.remove(self.pre_file)
self.pre_file = None
def encode(self, cp_points: defaultdict[str], cp_nozzle: defaultdict[str], board_width, board_height):
assert len(cp_points.keys()) == len(cp_nozzle.keys())
assert len(set(cp_nozzle.values())) <= self.max_nozzle_types
# === general info ===
total_points = sum(points for points in cp_points.values())
total_component_types, total_nozzle_types = len(cp_points.keys()), len(set(cp_nozzle.values()))
data = [total_points, total_component_types, total_nozzle_types]
data.extend([board_width, board_height])
# === heuristic info ===
cycle, nozzle_change, anc_move, pickup = self.heuristic_objective(cp_points, cp_nozzle)
data.extend([cycle, nozzle_change, anc_move, pickup])
# === nozzle info ===
nozzle_points = defaultdict(int)
for cp_idx, nozzle in cp_nozzle.items():
nozzle_points[cp_nozzle[cp_idx]] += cp_points[cp_idx] # points for different nozzle type
nozzle_items = [[nozzle, points] for nozzle, points in nozzle_points.items()]
nozzle_items = sorted(nozzle_items, key=lambda x: x[1], reverse=True)
nz2idx = defaultdict(int)
nozzle_slice = [0 for _ in range(self.max_nozzle_types)]
for idx, [nozzle, points] in enumerate(nozzle_items):
nz2idx[nozzle] = idx
nozzle_slice[idx] = points
data.extend(nozzle_slice)
# === component info ===
comp_data_slice = defaultdict(list)
for idx in range(self.max_nozzle_types):
comp_data_slice[idx] = []
cp_items = [[component, points] for component, points in cp_points.items()]
cp_items = sorted(cp_items, key=lambda x: (x[1], nz2idx[cp_nozzle[x[0]]] * 0.1 + x[1]), reverse=True)
for component, points in cp_items:
nozzle = cp_nozzle[component]
comp_data_slice[nz2idx[nozzle]].append(points)
data_slice = [0 for _ in range(self.max_nozzle_types)]
for idx in range(self.max_nozzle_types):
data_slice[idx] = len(comp_data_slice[idx])
data.extend(data_slice)
for idx in range(self.max_nozzle_types):
if len(comp_data_slice[idx]) <= self.max_component_types:
comp_data_slice[idx].extend([0 for _ in range(self.max_component_types - len(comp_data_slice[idx]))])
else:
comp_data_slice[idx] = comp_data_slice[idx][:self.max_component_types]
data.extend(comp_data_slice[idx])
return data
def heuristic_objective(self, cp_points, cp_nozzle):
if len(cp_points.keys()) or sum(cp_points.values()) == 0:
return 0, 0, 0, 0
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for idx, points in cp_points.items():
if points == 0:
continue
nozzle = cp_nozzle[idx]
nozzle_points[nozzle] += points
nozzle_heads[nozzle] = 1
anc_round_counter = 0
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
head_nozzle_assignment, min_cost = None, None
# generate initial nozzle group
nozzle_group = []
# averagely assign for the same type of nozzles, and generate nozzle group
nozzle_points_cpy = copy.deepcopy(nozzle_points)
for nozzle, heads in nozzle_heads.items():
points = nozzle_points_cpy[nozzle] // heads
for _ in range(heads):
nozzle_group.append([nozzle, points])
nozzle_points_cpy[nozzle] -= heads * points
for idx, [nozzle, _] in enumerate(nozzle_group):
if nozzle_points_cpy[nozzle]:
nozzle_group[idx][1] += 1
nozzle_points_cpy[nozzle] -= 1
while True:
# assign nozzle group to each head
nozzle_group.sort(key=lambda x: -x[1])
tmp_head_nozzle_assignment = []
head_total_points = [0 for _ in range(max_head_index)]
for idx, nozzle_item in enumerate(nozzle_group):
if idx < max_head_index:
tmp_head_nozzle_assignment.append([nozzle_item.copy()])
head_total_points[idx] += nozzle_item[1]
else:
min_head = np.argmin(head_total_points)
tmp_head_nozzle_assignment[min_head].append(nozzle_item.copy())
head_total_points[min_head] += nozzle_item[1]
cost = t_cycle * max(head_total_points)
for head in range(max_head_index):
for cycle in range(len(tmp_head_nozzle_assignment[head])):
if cycle + 1 == len(tmp_head_nozzle_assignment[head]):
if tmp_head_nozzle_assignment[head][cycle][0] != tmp_head_nozzle_assignment[head][-1][0]:
cost += t_nozzle_change
else:
if tmp_head_nozzle_assignment[head][cycle][0] != tmp_head_nozzle_assignment[head][cycle + 1][0]:
cost += t_nozzle_change
while True:
min_head, max_head = np.argmin(head_total_points), np.argmax(head_total_points)
min_head_nozzle, max_head_nozzle = tmp_head_nozzle_assignment[min_head][-1][0], \
tmp_head_nozzle_assignment[max_head][-1][0]
if min_head_nozzle == max_head_nozzle:
break
min_head_list, max_head_list = [min_head], [max_head]
minmax_head_points = 0
for head in range(max_head_index):
if head in min_head_list or head in max_head_list:
minmax_head_points += head_total_points[head]
continue
# the max/min heads with the sum nozzle type
if tmp_head_nozzle_assignment[head][-1][0] == tmp_head_nozzle_assignment[min_head][-1][0]:
min_head_list.append(head)
minmax_head_points += head_total_points[head]
if tmp_head_nozzle_assignment[head][-1][0] == tmp_head_nozzle_assignment[max_head][-1][0]:
max_head_list.append(head)
minmax_head_points += head_total_points[head]
# todo: restriction of available nozzle
# the reduction of cycles is not offset the cost of nozzle change
average_points = minmax_head_points // (len(min_head_list) + len(max_head_list))
reminder_points = minmax_head_points % (len(min_head_list) + len(max_head_list))
max_cycle = average_points + (1 if reminder_points > 0 else 0)
for head in range(max_head_index):
if head in min_head_list or head in max_head_list:
continue
max_cycle = max(max_cycle, head_total_points[head])
nozzle_change_counter = 0
for head in min_head_list:
if tmp_head_nozzle_assignment[head][0] == tmp_head_nozzle_assignment[head][-1]:
nozzle_change_counter += 2
else:
nozzle_change_counter += 1
if t_cycle * (max(head_total_points) - max_cycle) < t_nozzle_change * nozzle_change_counter:
break
cost -= t_cycle * (max(head_total_points) - max_cycle) - t_nozzle_change * nozzle_change_counter
required_points = 0 # 待均摊的贴装点数较多的吸嘴类型
for head in min_head_list:
points = average_points - head_total_points[head]
tmp_head_nozzle_assignment[head].append([max_head_nozzle, points])
head_total_points[head] = average_points
required_points += points
for head in max_head_list:
tmp_head_nozzle_assignment[head][-1][1] -= required_points // len(max_head_list)
head_total_points[head] -= required_points // len(max_head_list)
required_points -= (required_points // len(max_head_list)) * len(max_head_list)
for head in max_head_list:
if required_points <= 0:
break
tmp_head_nozzle_assignment[head][-1][1] -= 1
head_total_points[head] -= 1
required_points -= 1
if min_cost is None or cost < min_cost:
min_cost = cost
head_nozzle_assignment = copy.deepcopy(tmp_head_nozzle_assignment)
else:
break
# 在吸嘴组中增加一个吸嘴
idx, nozzle = 0, nozzle_group[0][0]
for idx, [nozzle_, _] in enumerate(nozzle_group):
if nozzle_ != nozzle:
break
average_points, remainder_points = nozzle_points[nozzle] // (idx + 1), nozzle_points[nozzle] % (idx + 1)
nozzle_group.append([nozzle, 0])
for idx, [nozzle_, _] in enumerate(nozzle_group):
if nozzle_ == nozzle:
nozzle_group[idx][1] = average_points + (1 if remainder_points > 0 else 0)
remainder_points -= 1
cycle_counter, nozzle_change_counter = 0, 0
for head in range(max_head_index):
head_cycle_counter = 0
for cycle in range(len(head_nozzle_assignment[head])):
if cycle + 1 == len(head_nozzle_assignment[head]):
if head_nozzle_assignment[head][0][0] != head_nozzle_assignment[head][-1][0]:
nozzle_change_counter += 1
else:
if head_nozzle_assignment[head][cycle][0] != head_nozzle_assignment[head][cycle + 1][0]:
nozzle_change_counter += 1
head_cycle_counter += head_nozzle_assignment[head][cycle][1]
cycle_counter = max(cycle_counter, head_cycle_counter)
# === 元件拾取次数预估 ===
cp_info = []
for idx, points in cp_points.items():
if points == 0:
continue
feeder_limit = 1 # todo: 暂时仅考虑一种吸嘴的情形
reminder_points = points % feeder_limit
for _ in range(feeder_limit):
cp_info.append([idx, points // feeder_limit + (1 if reminder_points > 0 else 0), cp_nozzle[idx]])
reminder_points -= 1
cp_info.sort(key=lambda x: -x[1])
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for info in cp_info:
nozzle = info[2]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], info[1])
nozzle_counter[nozzle] += 1
pickup_counter = sum(points for points in level_points.values())
return cycle_counter, nozzle_change_counter, anc_round_counter, pickup_counter
def decode(self, line_info):
items = line_info.split('\t')
board_width, board_height = float(items[7]), float(items[8])
total_points, total_component_types = int(items[9]), int(items[10])
part_col = ["part", "desc", "fdr", "nz", 'camera', 'group', 'feeder-limit', 'points']
step_col = ["ref", "x", "y", "z", "r", "part", "desc", "fdr", "nz", "hd", "cs", "cy", "sk", "bl", "ar", "pl",
"lv"]
component_data = pd.DataFrame(columns=part_col)
pcb_data = pd.DataFrame(columns=step_col)
idx = 1
for cp_counter in range(total_component_types):
part, nozzle = items[12 + cp_counter * 3], items[13 + cp_counter * 3]
points = int(items[14 + cp_counter * 3])
pos_list = []
for _ in range(points):
pos_list.append([np.random.uniform(0, board_width), np.random.uniform(0, board_height)])
component_data = pd.concat([component_data, pd.DataFrame(
[part, '', 'SM8', nozzle, '飞行相机1', 'CHIP-Rect', self.default_feeder_limit, points], index=part_col).T],
ignore_index=True)
for pos_x, pos_y in pos_list:
pcb_data = pd.concat([pcb_data, pd.DataFrame([['R' + str(idx), - pos_x, pos_y,
0.000, 0.000, part, '', 'A', '1-0 ' + nozzle, 1, 1, 1, 0,
1, 1, 1, 'L0']], columns=pcb_data.columns)],
ignore_index=True)
return pcb_data, component_data
def loader(self, file_path, data_filter=True, hinter=False):
cp_data, point_data, time_data = [], [], []
with open(file_path, 'r') as file:
line = file.readline()
while line:
items = line.split('\t')
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for cp_idx in range((len(items) - 12) // 3):
points = int(items[14 + cp_idx * 3])
if points == 0:
continue
component_type, nozzle_type = items[12 + cp_idx * 3], items[13 + cp_idx * 3]
cp_points[component_type], cp_nozzle[component_type] = points, nozzle_type
board_width, board_height = float(items[7]), float(items[8])
cycle, nozzle_change_data, pickup_data = float(items[1]), float(items[2]), float(items[4])
point_data.append(sum(int(items[14 + cp_idx * 3]) for cp_idx in range((len(items) - 12) // 3)))
# assembly time data
time_data.append(float(items[0]))
cp_data.append([cp_points, cp_nozzle, board_width, board_height, cycle, nozzle_change_data, pickup_data])
line = file.readline()
if data_filter:
# cph_data = [point_data[idx] / time_data[idx] * 3600 for idx in range(len(time_data))]
w_quart = 0.3
Q1, Q3 = np.percentile(np.array(time_data), 25), np.percentile(np.array(time_data), 75)
indices = [i for i in range(len(time_data)) if
Q1 - w_quart * (Q3 - Q1) <= time_data[i] <= Q3 + w_quart * (Q3 - Q1)]
filter_cp_data, filter_time_data = [], []
for idx in indices:
filter_cp_data.append(cp_data[idx])
filter_time_data.append(time_data[idx])
else:
filter_cp_data, filter_time_data = cp_data, time_data
if hinter:
print(
f"# of sample: {len(cp_data)}, outlier : {(1 - len(filter_cp_data) / len(cp_data)) * 100: .2f}%, "
f"mean: {np.average(filter_time_data): .2f}, median: {np.median(filter_time_data): .2f}, "
f"max: {np.max(filter_time_data): .2f}, min: {np.min(filter_time_data): .2f}, "
f"std. dev: {np.std(filter_time_data): .2f}")
return [filter_cp_data, filter_time_data]
def metric(self, file_path):
metric_data, time_data = [], []
with open(file_path, 'r') as file:
line = file.readline()
while line:
items = line.split('\t')
# cycle, nozzle change, anc move, pick up, pick distance, place distance, point
metric_data.append([float(items[i]) for i in list(range(1, 7))])
metric_data[-1].extend([sum(int(items[14 + cp_idx * 3]) for cp_idx in range((len(items) - 12) // 3))])
# assembly time data
time_data.append(float(items[0]))
line = file.readline()
return [metric_data, time_data]
def neural_encode(self, input_data):
train_data = []
for cp_points, cp_nozzle, board_width, board_height, cy, nz, pu in input_data:
train_data.append(self.encode(cp_points, cp_nozzle, board_width, board_height, cy, nz, pu))
return train_data
def get_feature(self):
return (self.max_component_types + 2) * self.max_nozzle_types + 5 + 4
# def neural_encode(self, input_data):
# train_data = []
# for cp_points, cp_nozzle, board_width, board_height in input_data:
# train_data.append(
# [len(cp_points.keys()), len(cp_nozzle.keys()), sum(cp_points.values()), board_width, board_height])
# return train_data
# def get_feature(self):
# return 5
def get_update_round(self):
return self.update

View File

@@ -1,10 +1,9 @@
# implementation of <<An integrated allocation method for the PCB assembly line balancing problem with nozzle changes>>
import matplotlib.pyplot as plt
from base_optimizer.optimizer_common import *
from lineopt_hyperheuristic import *
def selective_initialization(component_points, component_feeders, population_size):
def selective_initialization(component_points, component_feeders, population_size, machine_number):
population = [] # population initialization
for _ in range(population_size):
individual = []
@@ -12,14 +11,14 @@ def selective_initialization(component_points, component_feeders, population_siz
if points == 0:
continue
# 可用机器数
avl_machine_num = random.randint(1, min(max_machine_index, component_feeders[part_index], points))
avl_machine_num = random.randint(1, min(machine_number, component_feeders[part_index], points))
selective_possibility = []
for p in range(1, avl_machine_num + 1):
selective_possibility.append(pow(2, avl_machine_num - p + 1))
sel_machine_num = random_selective([p + 1 for p in range(avl_machine_num)], selective_possibility) # 选择的机器数
sel_machine_set = random.sample([p for p in range(max_machine_index)], sel_machine_num)
sel_machine_set = random.sample([p for p in range(machine_number)], sel_machine_num)
sel_machine_points = [1 for _ in range(sel_machine_num)]
for p in range(sel_machine_num - 1):
@@ -32,7 +31,7 @@ def selective_initialization(component_points, component_feeders, population_siz
sel_machine_points[-1] += (points - sum(sel_machine_points))
# code component allocation into chromosome
for p in range(max_machine_index):
for p in range(machine_number):
if p in sel_machine_set:
individual += [0 for _ in range(sel_machine_points[0])]
sel_machine_points.pop(0)
@@ -43,35 +42,35 @@ def selective_initialization(component_points, component_feeders, population_siz
return population
def selective_crossover(component_points, component_feeders, mother, father, non_decelerating=True):
def selective_crossover(component_points, component_feeders, mother, father, machine_number, non_decelerating=True):
assert len(mother) == len(father)
offspring1, offspring2 = mother.copy(), father.copy()
one_counter, feasible_cut_line = 0, []
idx = 0
for part_index, points in component_points:
for part_index, points in component_points.items():
one_counter = 0
idx_, mother_cut_line, father_cut_line = 0, [-1], [-1]
for idx_, gene in enumerate(mother[idx: idx + points + max_machine_index - 1]):
for idx_, gene in enumerate(mother[idx: idx + points + machine_number - 1]):
if gene:
mother_cut_line.append(idx_)
mother_cut_line.append(idx_ + 1)
for idx_, gene in enumerate(father[idx: idx + points + max_machine_index - 1]):
for idx_, gene in enumerate(father[idx: idx + points + machine_number - 1]):
if gene:
father_cut_line.append(idx_)
father_cut_line.append(idx_ + 1)
for offset in range(points + max_machine_index - 1):
for offset in range(points + machine_number - 1):
if mother[idx + offset] == 1:
one_counter += 1
if father[idx + offset] == 1:
one_counter -= 1
# first constraint: the total number of '1's (the number of partitions) in the chromosome is unchanged
if one_counter != 0 or offset == 0 or offset == points + max_machine_index - 2:
if one_counter != 0 or offset == 0 or offset == points + machine_number - 2:
continue
# the selected cut-line should guarantee there are the same or a larger number unassigned machine
@@ -87,13 +86,14 @@ def selective_crossover(component_points, component_feeders, mother, father, non
n_new += 1
# second constraint: non_decelerating or accelerating crossover
# non_decelerating or accelerating means that the number of machine without workload is increased
if n_new < n_bro or (n_new == n_bro and not non_decelerating):
continue
# third constraint (customized constraint):
# no more than the maximum number of available machine for each component type
new_mother_cut_line, new_father_cut_line = [], []
for idx_ in range(max_machine_index + 1):
for idx_ in range(machine_number + 1):
if mother_cut_line[idx_] <= offset:
new_mother_cut_line.append(mother_cut_line[idx_])
else:
@@ -108,11 +108,11 @@ def selective_crossover(component_points, component_feeders, mother, father, non
sorted(new_father_cut_line, reverse=False)
n_mother_machine, n_father_machine = 0, 0
for idx_ in range(max_machine_index):
if new_mother_cut_line[idx_ + 1] - new_mother_cut_line[idx_]:
for idx_ in range(machine_number):
if new_mother_cut_line[idx_ + 1] - new_mother_cut_line[idx_] > 1:
n_mother_machine += 1
if new_father_cut_line[idx_ + 1] - new_father_cut_line[idx_]:
if new_father_cut_line[idx_ + 1] - new_father_cut_line[idx_] > 1:
n_father_machine += 1
if n_mother_machine > component_feeders[part_index] or n_father_machine > component_feeders[part_index]:
@@ -120,7 +120,7 @@ def selective_crossover(component_points, component_feeders, mother, father, non
feasible_cut_line.append(idx + offset)
idx += (points + max_machine_index - 1)
idx += (points + machine_number - 1)
if len(feasible_cut_line) == 0:
return offspring1, offspring2
@@ -131,13 +131,13 @@ def selective_crossover(component_points, component_feeders, mother, father, non
return offspring1, offspring2
def cal_individual_val(component_points, component_nozzle, individual):
def cal_individual_val(component_points, component_nozzle, machine_number, individual, estimator):
idx, objective_val = 0, []
machine_component_points = [[] for _ in range(max_machine_index)]
machine_component_points = [[] for _ in range(machine_number)]
# decode the component allocation
for _, points in component_points:
component_gene = individual[idx: idx + points + max_machine_index - 1]
for part_index, points in component_points.items():
component_gene = individual[idx: idx + points + machine_number - 1]
machine_idx, component_counter = 0, 0
for gene in component_gene:
if gene:
@@ -147,130 +147,104 @@ def cal_individual_val(component_points, component_nozzle, individual):
else:
component_counter += 1
machine_component_points[-1].append(component_counter)
idx += (points + max_machine_index - 1)
for machine_idx in range(max_machine_index):
nozzle_points = defaultdict(int)
for idx, nozzle in component_nozzle.items():
if component_points[idx] == 0:
continue
nozzle_points[nozzle] += machine_component_points[machine_idx][idx]
idx += (points + machine_number - 1)
objective_val = 0
for machine_idx in range(machine_number):
machine_points = sum(machine_component_points[machine_idx]) # num of placement points
if machine_points == 0:
continue
ul = math.ceil(len(nozzle_points) * 1.0 / max_head_index) - 1 # num of nozzle set
# assignments of nozzles to heads
wl = 0 # num of workload
total_heads = (1 + ul) * max_head_index - len(nozzle_points)
nozzle_heads = defaultdict(int)
for nozzle in nozzle_points.keys():
nozzle_heads[nozzle] = math.floor(nozzle_points[nozzle] * 1.0 / machine_points * total_heads)
nozzle_heads[nozzle] += 1
total_heads = (1 + ul) * max_head_index
for heads in nozzle_heads.values():
total_heads -= heads
for nozzle in sorted(nozzle_heads, key=lambda x: nozzle_points[x] / nozzle_heads[x], reverse=True):
if total_heads == 0:
break
nozzle_heads[nozzle] += 1
total_heads -= 1
# averagely assign placements to heads
heads_placement = []
for nozzle in nozzle_heads.keys():
points = math.floor(nozzle_points[nozzle] / nozzle_heads[nozzle])
heads_placement += [[nozzle, points] for _ in range(nozzle_heads[nozzle])]
nozzle_points[nozzle] -= (nozzle_heads[nozzle] * points)
for idx in range(len(heads_placement) - 1, -1, -1):
if nozzle_points[nozzle] <= 0:
break
nozzle_points[nozzle] -= 1
heads_placement[idx][1] += 1
heads_placement = sorted(heads_placement, key=lambda x: x[1], reverse=True)
# every max_head_index heads in the non-decreasing order are grouped together as nozzle set
for idx in range(len(heads_placement) // max_head_index):
wl += heads_placement[idx][1]
objective_val.append(T_pp * machine_points + T_tr * wl + T_nc * ul)
cp_points, cp_nozzle = defaultdict(int), defaultdict(str)
for part_index, points in enumerate(machine_component_points[machine_idx]):
if points == 0:
continue
cp_points[part_index], cp_nozzle[part_index] = points, component_nozzle[part_index]
objective_val = max(objective_val, estimator.predict(cp_points, cp_nozzle))
return objective_val, machine_component_points
def assemblyline_optimizer_genetic(pcb_data, component_data):
def individual_convert(component_points, individual):
machine_number = len(individual)
machine_component_points = [[] for _ in range(machine_number)]
idx = 0
# decode the component allocation
for comp_idx, points in component_points:
component_gene = individual[idx: idx + points + machine_number - 1]
machine_idx, component_counter = 0, 0
for gene in component_gene:
if gene:
machine_component_points[machine_idx].append(component_counter)
machine_idx += 1
component_counter = 0
else:
component_counter += 1
machine_component_points[-1].append(component_counter)
idx += (points + machine_number - 1)
return machine_component_points
def line_optimizer_genetic(component_data, machine_number):
# basic parameter
# crossover rate & mutation rate: 80% & 10%
# population size: 200
# the number of generation: 500
np.random.seed(0)
crossover_rate, mutation_rate = 0.8, 0.1
population_size, n_generations = 200, 500
estimator = HeuristicEstimator()
# the number of placement points, the number of available feeders, and nozzle type of component respectively
component_points, component_feeders, component_nozzle = defaultdict(int), defaultdict(int), defaultdict(str)
for data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data[1]['part']].index.tolist()[0]
nozzle = component_data.loc[part_index]['nz']
component_points[part_index] += 1
component_feeders[part_index] = component_data.loc[part_index]['feeder-limit']
component_nozzle[part_index] = nozzle
component_points = sorted(component_points.items(), key=lambda x: x[0]) # 决定染色体排列顺序
cp_points, cp_feeders, cp_nozzle = defaultdict(int), defaultdict(int), defaultdict(int)
for part_index, data in component_data.iterrows():
cp_points[part_index] += data.points
cp_feeders[part_index], cp_nozzle[part_index] = data.fdn, data.nz
# population initialization
best_popval = []
population = selective_initialization(component_points, component_feeders, population_size)
population = selective_initialization(sorted(cp_points.items(), key=lambda x: x[0]), cp_feeders, population_size,
machine_number)
# calculate fitness value
pop_val = [cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)[0] for individual in
population]
with tqdm(total=n_generations) as pbar:
pbar.set_description('genetic algorithm process for PCB assembly line balance')
new_population, new_pop_val = [], []
new_population = []
for _ in range(n_generations):
# calculate fitness value
pop_val = []
for individual in population:
val, assigned_points = cal_individual_val(component_points, component_nozzle, individual)
pop_val.append(max(val))
population += new_population
for individual in new_population:
val, _ = cal_individual_val(cp_points, cp_nozzle, machine_number, individual, estimator)
pop_val.append(val)
best_popval.append(min(pop_val))
select_index = get_top_k_value(pop_val, population_size - len(new_pop_val), reverse=False)
select_index = get_top_k_value(pop_val, population_size, reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
population += new_population
for individual in new_population:
val, _ = cal_individual_val(component_points, component_nozzle, individual)
pop_val.append(max(val))
# min-max convert
max_val = max(pop_val)
pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(pop_val) + 1e-10
pop_val = [v / sum_pop_val + 1e-3 for v in pop_val]
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(sel_pop_val) + 1e-10
sel_pop_val = [v / sum_pop_val + 1e-3 for v in sel_pop_val]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(pop_val)
index1 = roulette_wheel_selection(sel_pop_val)
while True:
index2 = roulette_wheel_selection(pop_val)
index2 = roulette_wheel_selection(sel_pop_val)
if index1 != index2:
break
offspring1, offspring2 = selective_crossover(component_points, component_feeders,
population[index1], population[index2])
offspring1, offspring2 = selective_crossover(cp_points, cp_feeders,
population[index1], population[index2], machine_number)
if np.random.random() < mutation_rate:
offspring1 = constraint_swap_mutation(component_points, offspring1)
offspring1 = constraint_swap_mutation(cp_points, offspring1, machine_number)
if np.random.random() < mutation_rate:
offspring1 = constraint_swap_mutation(component_points, offspring1)
offspring2 = constraint_swap_mutation(cp_points, offspring2, machine_number)
new_population.append(offspring1)
new_population.append(offspring2)
@@ -278,12 +252,13 @@ def assemblyline_optimizer_genetic(pcb_data, component_data):
pbar.update(1)
best_individual = population[np.argmax(pop_val)]
_, assignment_result = cal_individual_val(component_points, component_nozzle, best_individual)
val, assignment_result = cal_individual_val(cp_points, cp_nozzle, machine_number, best_individual, estimator)
print('final value: ', val)
# available feeder check
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
for machine_index in range(max_machine_index):
feeder_limit = data.fdn
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index]:
feeder_limit -= 1
assert feeder_limit >= 0

429
lineopt_heuristic.py Normal file
View File

@@ -0,0 +1,429 @@
import copy
import math
import random
import numpy as np
from base_optimizer.optimizer_common import *
from base_optimizer.smopt_feederpriority import *
from base_optimizer.result_analysis import *
# TODO: nozzle tool available restriction
# TODO: consider with the PCB placement topology
def assembly_time_estimator(assignment_points, arranged_feeders, component_data):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for idx, points in enumerate(assignment_points):
if points == 0:
continue
nozzle_points[component_data.iloc[idx]['nz']] += points
nozzle_heads[component_data.iloc[idx]['nz']] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
head_nozzle_assignment, min_cost = None, None
# generate initial nozzle group
nozzle_group = []
# averagely assign for the same type of nozzles, and generate nozzle group
nozzle_points_cpy = copy.deepcopy(nozzle_points)
for nozzle, heads in nozzle_heads.items():
points = nozzle_points_cpy[nozzle] // heads
for _ in range(heads):
nozzle_group.append([nozzle, points])
nozzle_points_cpy[nozzle] -= heads * points
for idx, [nozzle, _] in enumerate(nozzle_group):
if nozzle_points_cpy[nozzle]:
nozzle_group[idx][1] += 1
nozzle_points_cpy[nozzle] -= 1
while True:
# assign nozzle group to each head
nozzle_group.sort(key=lambda x: -x[1])
tmp_head_nozzle_assignment = []
head_total_points = [0 for _ in range(max_head_index)]
for idx, nozzle_item in enumerate(nozzle_group):
if idx < max_head_index:
tmp_head_nozzle_assignment.append([nozzle_item.copy()])
head_total_points[idx] += nozzle_item[1]
else:
min_head = np.argmin(head_total_points)
tmp_head_nozzle_assignment[min_head].append(nozzle_item.copy())
head_total_points[min_head] += nozzle_item[1]
cost = t_cycle * max(head_total_points)
for head in range(max_head_index):
for cycle in range(len(tmp_head_nozzle_assignment[head])):
if cycle + 1 == len(tmp_head_nozzle_assignment[head]):
if tmp_head_nozzle_assignment[head][cycle][0] != tmp_head_nozzle_assignment[head][-1][0]:
cost += t_nozzle_change
else:
if tmp_head_nozzle_assignment[head][cycle][0] != tmp_head_nozzle_assignment[head][cycle + 1][0]:
cost += t_nozzle_change
while True:
min_head, max_head = np.argmin(head_total_points), np.argmax(head_total_points)
min_head_nozzle, max_head_nozzle = tmp_head_nozzle_assignment[min_head][-1][0], \
tmp_head_nozzle_assignment[max_head][-1][0]
if min_head_nozzle == max_head_nozzle:
break
min_head_list, max_head_list = [min_head], [max_head]
minmax_head_points = 0
for head in range(max_head_index):
if head in min_head_list or head in max_head_list:
minmax_head_points += head_total_points[head]
continue
# the max/min heads with the sum nozzle type
if tmp_head_nozzle_assignment[head][-1][0] == tmp_head_nozzle_assignment[min_head][-1][0]:
min_head_list.append(head)
minmax_head_points += head_total_points[head]
if tmp_head_nozzle_assignment[head][-1][0] == tmp_head_nozzle_assignment[max_head][-1][0]:
max_head_list.append(head)
minmax_head_points += head_total_points[head]
# todo: restriction of available nozzle
# the reduction of cycles is not offset the cost of nozzle change
average_points = minmax_head_points // (len(min_head_list) + len(max_head_list))
reminder_points = minmax_head_points % (len(min_head_list) + len(max_head_list))
max_cycle = average_points + (1 if reminder_points > 0 else 0)
for head in range(max_head_index):
if head in min_head_list or head in max_head_list:
continue
max_cycle = max(max_cycle, head_total_points[head])
nozzle_change_counter = 0
for head in min_head_list:
if tmp_head_nozzle_assignment[head][0] == tmp_head_nozzle_assignment[head][-1]:
nozzle_change_counter += 2
else:
nozzle_change_counter += 1
if t_cycle * (max(head_total_points) - max_cycle) < t_nozzle_change * nozzle_change_counter:
break
cost -= t_cycle * (max(head_total_points) - max_cycle) - t_nozzle_change * nozzle_change_counter
required_points = 0 # 待均摊的贴装点数较多的吸嘴类型
for head in min_head_list:
points = average_points - head_total_points[head]
tmp_head_nozzle_assignment[head].append([max_head_nozzle, points])
head_total_points[head] = average_points
required_points += points
for head in max_head_list:
tmp_head_nozzle_assignment[head][-1][1] -= required_points // len(max_head_list)
head_total_points[head] -= required_points // len(max_head_list)
required_points -= (required_points // len(max_head_list)) * len(max_head_list)
for head in max_head_list:
if required_points <= 0:
break
tmp_head_nozzle_assignment[head][-1][1] -= 1
head_total_points[head] -= 1
required_points -= 1
if min_cost is None or cost < min_cost:
min_cost = cost
head_nozzle_assignment = copy.deepcopy(tmp_head_nozzle_assignment)
else:
break
# 在吸嘴组中增加一个吸嘴
idx, nozzle = 0, nozzle_group[0][0]
for idx, [nozzle_, _] in enumerate(nozzle_group):
if nozzle_ != nozzle:
break
average_points, remainder_points = nozzle_points[nozzle] // (idx + 1), nozzle_points[nozzle] % (idx + 1)
nozzle_group.append([nozzle, 0])
for idx, [nozzle_, _] in enumerate(nozzle_group):
if nozzle_ == nozzle:
nozzle_group[idx][1] = average_points + (1 if remainder_points > 0 else 0)
remainder_points -= 1
cycle_counter, nozzle_change_counter = 0, 0
for head in range(max_head_index):
head_cycle_counter = 0
for cycle in range(len(head_nozzle_assignment[head])):
if cycle + 1 == len(head_nozzle_assignment[head]):
if head_nozzle_assignment[head][0][0] != head_nozzle_assignment[head][-1][0]:
nozzle_change_counter += 1
else:
if head_nozzle_assignment[head][cycle][0] != head_nozzle_assignment[head][cycle + 1][0]:
nozzle_change_counter += 1
head_cycle_counter += head_nozzle_assignment[head][cycle][1]
cycle_counter = max(cycle_counter, head_cycle_counter)
# === 元件拾取次数预估 ===
cp_info = []
for idx, points in enumerate(assignment_points):
if points == 0:
continue
feeder_limit = int(component_data.iloc[idx].fdn)
reminder_points = points % feeder_limit
for _ in range(feeder_limit):
cp_info.append(
[idx, points // feeder_limit + (1 if reminder_points > 0 else 0), component_data.iloc[idx].nz])
reminder_points -= 1
cp_info.sort(key=lambda x: -x[1])
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for info in cp_info:
nozzle = info[2]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], info[1])
nozzle_counter[nozzle] += 1
pickup_counter = sum(points for points in level_points.values())
placement_counter = sum(assignment_points)
pickup_movement = 0
for points in assignment_points:
if points:
pickup_movement += 1
# 返回加权预估时间
return t_cycle * cycle_counter + t_nozzle_change * nozzle_change_counter + t_pick * pickup_counter + \
t_place * placement_counter + 0.1 * pickup_movement
def line_optimizer_heuristic(component_data, machine_number):
# the number of placement points, the number of available feeders, and nozzle type of component respectively
component_number = len(component_data)
nozzle_points = defaultdict(int) # the number of placements of nozzle
total_points = 0
for _, data in component_data.iterrows():
nozzle = data['nz']
nozzle_points[nozzle] += data['points']
total_points += data['point']
# first step: generate the initial solution with equalized workload
assignment_result = [[0 for _ in range(component_number)] for _ in range(machine_number)]
assignment_points = [0 for _ in range(machine_number)]
average_points = total_points // machine_number
weighted_points = list(
map(lambda _, data: data['points'] + 1e-5 * nozzle_points[data['nz']], component_data.iterrows()))
# for part_index in np.argsort(weighted_points)[::-1]:
for part_index in np.argsort(weighted_points)[::-1]:
if (total_points := component_data.iloc[part_index]['points']) == 0: # total placements for each component type
continue
machine_set = []
# define the machine that assigning placement points (considering the feeder limitation)
for machine_index in np.argsort(assignment_points):
if len(machine_set) >= component_data.iloc[part_index].points or len(machine_set) >= \
component_data.iloc[part_index].fdn:
break
machine_set.append(machine_index)
if weighted_points[part_index] + assignment_points[machine_index] < average_points:
break
# Allocation of mounting points to available machines according to the principle of equality
while total_points:
assign_machine = list(filter(lambda x: assignment_points[x] == min(assignment_points), machine_set))
if len(assign_machine) == len(machine_set):
# averagely assign point to all available machines
points = total_points // len(assign_machine)
for machine_index in machine_set:
assignment_points[machine_index] += points
assignment_result[machine_index][part_index] += points
total_points -= points * len(assign_machine)
for machine_index in machine_set:
if total_points == 0:
break
assignment_points[machine_index] += 1
assignment_result[machine_index][part_index] += 1
total_points -= 1
else:
# assigning placements to make up for the gap between the least and the second least
second_least_machine, second_least_machine_points = -1, max(assignment_points) + 1
for idx in machine_set:
if assignment_points[idx] < second_least_machine_points and assignment_points[idx] != min(
assignment_points):
second_least_machine_points = assignment_points[idx]
second_least_machine = idx
assert second_least_machine != -1
if len(assign_machine) * (second_least_machine_points - min(assignment_points)) < total_points:
min_points = min(assignment_points)
total_points -= len(assign_machine) * (second_least_machine_points - min_points)
for machine_index in assign_machine:
assignment_points[machine_index] += (second_least_machine_points - min_points)
assignment_result[machine_index][part_index] += (
second_least_machine_points - min_points)
else:
points = total_points // len(assign_machine)
for machine_index in assign_machine:
assignment_points[machine_index] += points
assignment_result[machine_index][part_index] += points
total_points -= points * len(assign_machine)
for machine_index in assign_machine:
if total_points == 0:
break
assignment_points[machine_index] += 1
assignment_result[machine_index][part_index] += 1
total_points -= 1
prev_max_assembly_time, prev_assignment_result = None, None
while True:
# second step: estimate the assembly time for each machine
arranged_feeders = defaultdict(list)
for machine_index in range(machine_number):
arranged_feeders[machine_index] = [0 for _ in range(component_number)]
for part_index in range(component_number):
feeder_limit = component_data.iloc[part_index].fdn # 总体可用数
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index] == 0:
continue
feeder_limit -= 1
# 已分配元件的机器至少安装1把供料器
arranged_feeders[machine_index][part_index] = 1
assert feeder_limit >= 0
for part_index in range(component_number):
total_feeder_limit = component_data.iloc[part_index].fdn - sum(
[arranged_feeders[machine_index][part_index] for machine_index in range(machine_number)])
while total_feeder_limit > 0:
max_ratio, max_ratio_machine = None, -1
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index] == 0:
continue
ratio = assignment_result[machine_index][part_index] / arranged_feeders[machine_index][part_index]
if max_ratio is None or ratio > max_ratio:
max_ratio, max_ratio_machine = ratio, machine_index
assert max_ratio_machine is not None
arranged_feeders[max_ratio_machine][part_index] += 1
total_feeder_limit -= 1
assembly_time, chip_per_hour = [], []
for machine_index in range(machine_number):
assembly_time.append(
assembly_time_estimator(assignment_result[machine_index], arranged_feeders[machine_index],
component_data))
chip_per_hour.append(sum(assignment_result[machine_index]) / (assembly_time[-1] + 1e-10))
max_assembly_time = max(assembly_time)
if prev_max_assembly_time and (prev_max_assembly_time < max_assembly_time or abs(
max_assembly_time - prev_max_assembly_time) < 1e-10):
if prev_max_assembly_time < max_assembly_time:
assignment_result = copy.deepcopy(prev_assignment_result)
break
else:
prev_max_assembly_time = max_assembly_time
prev_assignment_result = copy.deepcopy(assignment_result)
# third step: adjust the assignment results to reduce maximal assembly time among all machines
# ideal averagely assigned points
average_assign_points = [round(total_points * chip_per_hour[mi] / sum(chip_per_hour)) for mi in
range(machine_number)]
machine_index = 0
while total_points != sum(average_assign_points):
if total_points > sum(average_assign_points):
average_assign_points[machine_index] += 1
else:
average_assign_points[machine_index] -= 1
machine_index += 1
if machine_index >= machine_number:
machine_index = 0
# the placement points that need to be re-allocated
machine_reallocate_points = [sum(assignment_result[mi]) - average_assign_points[mi] for mi in
range(machine_number)]
# workload balance
# 1. balance the number of placements of the same type between different machines.
for demand_mi in range(machine_number):
if machine_reallocate_points[demand_mi] >= 0:
continue
supply_machine_list = [mi for mi in range(machine_number) if machine_reallocate_points[mi] > 0]
supply_machine_list.sort(key=lambda mi: -machine_reallocate_points[mi])
for supply_mi in supply_machine_list:
for part_index in range(component_number):
if assignment_result[supply_mi][part_index] <= 0:
continue
reallocate_points = min(assignment_result[supply_mi][part_index],
-machine_reallocate_points[demand_mi])
# upper available feeder restrictions
tmp_reallocate_result = [assignment_result[mi][part_index] for mi in range(machine_number)]
tmp_reallocate_result[supply_mi] -= reallocate_points
tmp_reallocate_result[demand_mi] += reallocate_points
if sum(1 for pt in tmp_reallocate_result if pt > 0) > component_data.iloc[part_index].fdn:
continue
assignment_result[supply_mi][part_index] -= reallocate_points
machine_reallocate_points[supply_mi] -= reallocate_points
assignment_result[demand_mi][part_index] += reallocate_points
machine_reallocate_points[demand_mi] += reallocate_points
if machine_reallocate_points[demand_mi] <= 0:
break
# 2. balance the number of placements of the different type between different machines.
cp_info = []
for part_index in range(component_number):
for machine_index in range(machine_number):
if assignment_result[machine_index][part_index] == 0:
continue
cp_info.append([machine_index, part_index, assignment_result[machine_index][part_index]])
for machine_index in range(machine_number):
if machine_reallocate_points[machine_index] >= 0:
continue
filter_cp_info = [info for info in cp_info if
info[0] != machine_index and machine_reallocate_points[info[0]] > 0]
while True:
if len(filter_cp_info) == 0 or machine_reallocate_points[machine_index] >= 0:
break
# todo: 对同时拾取数的影响
filter_cp_info.sort(key=lambda x: x[2] + machine_reallocate_points[machine_index])
info = filter_cp_info[0]
filter_cp_info.remove(info)
if abs(machine_reallocate_points[machine_index]) + abs(machine_reallocate_points[info[0]]) < abs(
machine_reallocate_points[machine_index] + info[2]) + abs(
machine_reallocate_points[info[0]] - info[2]):
continue
cp_info.remove(info)
assignment_result[info[0]][info[1]] = 0
assignment_result[machine_index][info[1]] += info[2]
machine_reallocate_points[info[0]] -= info[2]
machine_reallocate_points[machine_index] += info[2]
return assignment_result

423
lineopt_hyperheuristic.py Normal file
View File

@@ -0,0 +1,423 @@
from base_optimizer.optimizer_interface import *
from generator import *
from estimator import *
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class Heuristic:
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
return -1
class LeastPoints(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_points = []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_points.append(sum([cp_points[cp_idx] for cp_idx in cp_assign[machine_idx]]))
return machine_assign[np.argmin(machine_points)]
class LeastNzTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_nozzle = []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nozzle.append([cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]])
index = np.argmin(
[len(set(nozzle)) + 1e-5 * sum(cp_points[c] for c in cp_assign[machine_idx]) for machine_idx, nozzle in
enumerate(machine_nozzle)])
return machine_assign[index]
class LeastCpTypes(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_types = []
for machine_idx in machine_assign:
machine_types.append(
len(cp_assign[machine_idx]) + 1e-5 * sum(cp_points[cp] for cp in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_types)]
class LeastCpNzRatio(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_nz_type, machine_cp_type = [], []
for machine_idx in machine_assign:
if len(cp_assign[machine_idx]) == 0:
return machine_idx
machine_nz_type.append(set(cp_nozzle[cp_idx] for cp_idx in cp_assign[machine_idx]))
machine_cp_type.append(len(cp_assign[machine_idx]))
min_idx = np.argmin([(machine_cp_type[idx] + 1e-5 * sum(
cp_points[c] for c in cp_assign[machine_assign[idx]])) / (len(machine_nz_type[idx]) + 1e-5) for idx in
range(len(machine_assign))])
return machine_assign[min_idx]
def nozzle_assignment(cp_points, cp_nozzle, cp_assign):
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for cp_idx in cp_assign:
nozzle_points[cp_nozzle[cp_idx]] += cp_points[cp_idx]
nozzle_heads[cp_nozzle[cp_idx]] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
return nozzle_heads, nozzle_points
class LeastCycle(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_cycle = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
machine_cycle.append(
max(nozzle_points[nozzle] / head for nozzle, head in nozzle_heads.items()) + 1e-5 * sum(
cp_points[c] for c in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_cycle)]
class LeastNzChange(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_nozzle_change = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
if len(assign_component) == 0:
return machine_idx
heads_points = []
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
for nozzle, head in nozzle_heads.items():
for _ in range(head):
heads_points.append(nozzle_points[nozzle] / nozzle_heads[nozzle])
machine_nozzle_change.append(np.std(heads_points) + 1e-5 * sum(cp_points[c] for c in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_nozzle_change)]
class LeastPickup(Heuristic):
@staticmethod
def apply(cp_points, cp_nozzle, cp_assign, machine_assign):
machine_pick_up = []
for machine_idx in machine_assign:
assign_component = cp_assign[machine_idx]
if len(assign_component) == 0:
return machine_idx
nozzle_heads, nozzle_points = nozzle_assignment(cp_points, cp_nozzle, assign_component)
nozzle_level, nozzle_counter = defaultdict(int), defaultdict(int)
level_points = defaultdict(int)
for cp_idx in sorted(assign_component, key=lambda x: cp_points[x], reverse=True):
nozzle, points = cp_nozzle[cp_idx], cp_points[cp_idx]
if nozzle_counter[nozzle] and nozzle_counter[nozzle] % nozzle_heads[nozzle] == 0:
nozzle_level[nozzle] += 1
level = nozzle_level[nozzle]
level_points[level] = max(level_points[level], points)
nozzle_counter[nozzle] += 1
machine_pick_up.append(sum(points for points in level_points.values()) + 1e-5 * sum(
cp_points[idx] for idx in cp_assign[machine_idx]))
return machine_assign[np.argmin(machine_pick_up)]
def generate_pattern(heuristic_map, cp_points):
"""
Generates a random pattern.
:return: The generated pattern string.
"""
return "".join([random.choice(list(heuristic_map.keys())) for _ in range(random.randrange(1, len(cp_points)))])
def crossover(cp_points, parent1, parent2):
"""
Attempt to perform crossover between two chromosomes.
:param parent1: The first parent.
:param parent2: The second parent.
:return: The two individuals after crossover has been performed.
"""
point1, point2 = random.randrange(len(parent1)), random.randrange(len(parent2))
substr1, substr2 = parent1[point1:], parent2[point2:]
offspring1, offspring2 = "".join((parent1[:point1], substr2)), "".join((parent2[:point2], substr1))
return offspring1[:len(cp_points)], offspring2[:len(cp_points)]
def mutation(heuristic_map, cp_points, individual):
"""
Attempts to mutate the individual by replacing a random heuristic in the chromosome by a generated pattern.
:param individual: The individual to mutate.
:return: The mutated individual.
"""
pattern = list(individual)
mutation_point = random.randrange(len(pattern))
pattern[mutation_point] = generate_pattern(heuristic_map, cp_points)
return ''.join(pattern)[:len(cp_points)]
def population_initialization(population_size, heuristic_map, cp_points):
return [generate_pattern(heuristic_map, cp_points) for _ in range(population_size)]
def convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, component_list, individual,
machine_number):
component_number = len(cp_feeders.keys())
cp_assign = [[] for _ in range(machine_number)]
component_machine_assign = [[0 for _ in range(machine_number)] for _ in range(component_number)]
machine_assign_counter = [0 for _ in range(machine_number)]
data_mgr = DataMgr()
for idx, div_cp_idx in enumerate(component_list):
h = individual[idx % len(individual)]
cp_idx = cp_index[div_cp_idx]
machine_assign = [] # 可被分配的机器索引
if sum(component_machine_assign[cp_idx][:]) < cp_feeders[cp_idx]:
for machine_idx in range(machine_number):
if component_machine_assign[cp_idx][machine_idx] or machine_assign_counter[
machine_idx] < data_mgr.max_component_types:
machine_assign.append(machine_idx)
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, machine_assign)
else:
for machine_idx in range(machine_number):
if component_machine_assign[cp_idx][machine_idx]:
machine_assign.append(machine_idx)
machine_idx = heuristic_map[h].apply(cp_points, cp_nozzle, cp_assign, machine_assign)
cp_assign[machine_idx].append(div_cp_idx)
if component_machine_assign[cp_idx][machine_idx] == 0:
machine_assign_counter[machine_idx] += 1
component_machine_assign[cp_idx][machine_idx] = 1
return cp_assign
def cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width, board_height,
component_list, individual, machine_number, estimator):
machine_cp_assign = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, component_list,
individual, machine_number)
component_number = len(cp_feeders)
machine_cp_points = [[0 for _ in range(component_number)] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for idx in machine_cp_assign[machine_idx]:
machine_cp_points[machine_idx][cp_index[idx]] += cp_points[idx]
machine_cp_feeders = [[0 for _ in range(component_number)] for _ in range(machine_number)]
for cp_idx in range(component_number):
feeder_nums = cp_feeders[cp_idx]
for machine_idx in range(machine_number):
if machine_cp_points[machine_idx][cp_idx]:
machine_cp_feeders[machine_idx][cp_idx] = 1
feeder_nums -= 1
while feeder_nums > 0:
assign_machine = None
for machine_idx in range(machine_number):
if machine_cp_points[machine_idx][cp_idx] == 0:
continue
if assign_machine is None:
assign_machine = machine_idx
continue
if machine_cp_points[assign_machine][cp_idx] / machine_cp_feeders[assign_machine][cp_idx] \
< machine_cp_points[machine_idx][cp_idx] / machine_cp_feeders[machine_idx][cp_idx]:
assign_machine = machine_idx
machine_cp_feeders[assign_machine][cp_idx] += 1
feeder_nums -= 1
nozzle_type = defaultdict(str)
for idx, cp_idx in cp_index.items():
nozzle_type[cp_idx] = cp_nozzle[idx]
objective_val = []
for machine_idx in range(machine_number):
div_cp_points, div_cp_nozzle = defaultdict(int), defaultdict(str)
idx = 0
for cp_idx in range(component_number):
total_points = machine_cp_points[machine_idx][cp_idx]
if total_points == 0:
continue
div_index = 0
div_points = [total_points // machine_cp_feeders[machine_idx][cp_idx] for _ in
range(machine_cp_feeders[machine_idx][cp_idx])]
while sum(div_points) < total_points:
div_points[div_index] += 1
div_index += 1
for points in div_points:
div_cp_points[idx] = points
div_cp_nozzle[idx] = nozzle_type[cp_idx]
idx += 1
objective_val.append(estimator.predict(div_cp_points, div_cp_nozzle, board_width, board_height))
return objective_val
def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
heuristic_map = {
'p': LeastPoints,
'n': LeastNzTypes,
'c': LeastCpTypes,
'r': LeastCpNzRatio,
'k': LeastCycle,
'g': LeastNzChange,
'u': LeastPickup,
}
division_part = []
for _, data in component_data.iterrows():
division_part.extend([data.points / data.fdn for _ in range(data.fdn)])
division_points = sum(division_part) / len(division_part)
# genetic-based hyper-heuristic
crossover_rate, mutation_rate = 0.6, 0.1
population_size, total_generation = 20, 50
group_size = 10
estimator = NeuralEstimator()
best_val = None
best_heuristic_list, best_component_list = None, None
cp_feeders, cp_nozzle = defaultdict(int), defaultdict(str)
cp_points, cp_index = defaultdict(int), defaultdict(int)
division_component_data = pd.DataFrame(columns=component_data.columns)
idx = 0
for cp_idx, data in component_data.iterrows():
cp_feeders[cp_idx] = data.fdn
division_data = copy.deepcopy(data)
feeder_limit, total_points = division_data.fdn, division_data.points
if feeder_limit != 1:
feeder_limit = round(min(max(total_points // division_points * 1.5, feeder_limit), total_points))
# feeder_limit = total_points # 小规模数据启用
surplus_points = total_points % feeder_limit
for _ in range(feeder_limit):
division_data.fdn, division_data.points = 1, math.floor(total_points / feeder_limit)
if surplus_points:
division_data.points += 1
surplus_points -= 1
cp_points[idx], cp_nozzle[idx] = division_data.points, division_data.nz
cp_index[idx] = cp_idx
idx += 1
division_component_data = pd.concat([division_component_data, pd.DataFrame(division_data).T])
division_component_data = division_component_data.reset_index()
component_list = [idx for idx, data in division_component_data.iterrows() if data.points > 0]
board_width, board_height = pcb_data['x'].max() - pcb_data['x'].min(), pcb_data['y'].max() - pcb_data['y'].min()
with tqdm(total=total_generation * group_size) as pbar:
pbar.set_description('hyper-heuristic algorithm process for PCB assembly line balance')
for _ in range(group_size):
random.shuffle(component_list)
new_population = []
population = population_initialization(population_size, heuristic_map, cp_points)
# calculate fitness value
pop_val = []
for individual in population:
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, individual, machine_number, estimator)
pop_val.append(max(val))
for _ in range(total_generation):
population += new_population
for individual in new_population:
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, individual, machine_number, estimator)
pop_val.append(max(val))
select_index = get_top_k_value(pop_val, population_size, reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
# min-max convert
max_val = max(pop_val)
sel_pop_val = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(sel_pop_val) + 1e-10
sel_pop_val = [v / sum_pop_val + 1e-3 for v in sel_pop_val]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(sel_pop_val)
while True:
index2 = roulette_wheel_selection(sel_pop_val)
if index1 != index2:
break
offspring1, offspring2 = crossover(cp_points, population[index1], population[index2])
if np.random.random() < mutation_rate:
offspring1 = mutation(heuristic_map, cp_points, offspring1)
if np.random.random() < mutation_rate:
offspring2 = mutation(heuristic_map, cp_points, offspring2)
new_population.append(offspring1)
new_population.append(offspring2)
pbar.update(1)
val = cal_individual_val(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, board_width,
board_height, component_list, population[0], machine_number, estimator)
machine_assign = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders,
component_list, population[0], machine_number)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for idx in machine_assign[machine_idx]:
assignment_result[machine_idx][cp_index[idx]] += cp_points[idx]
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data,
assignment_result)
max_machine_idx = np.argmax(val)
val = exact_assembly_time(partial_pcb_data[max_machine_idx], partial_component_data[max_machine_idx])
if best_val is None or val < best_val:
for machine_idx in range(machine_number):
if machine_idx == max_machine_idx:
continue
val = max(val,
exact_assembly_time(partial_pcb_data[machine_idx], partial_component_data[machine_idx]))
if best_val is not None and val > best_val:
break
if best_val is None or val < best_val:
best_val = val
best_heuristic_list = population[0]
best_component_list = component_list.copy()
machine_cp_points = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders,
best_component_list, best_heuristic_list, machine_number, is_opt=True)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number):
for idx in machine_cp_points[machine_idx]:
assignment_result[machine_idx][cp_index[idx]] += cp_points[idx]
return assignment_result

195
lineopt_model.py Normal file
View File

@@ -0,0 +1,195 @@
from base_optimizer.optimizer_common import *
from base_optimizer.result_analysis import *
from base_optimizer.smtopt_route import *
def line_optimizer_model(component_data, pcb_data, machine_num, hinter=True):
mdl = Model('pcb assembly line optimizer')
mdl.setParam('Seed', 0)
mdl.setParam('OutputFlag', hinter) # set whether output the debug information
mdl.setParam('TimeLimit', 1000)
nozzle_type, component_type = [], []
for _, data in component_data.iterrows():
if not data.nz in nozzle_type:
nozzle_type.append(data.nz)
component_type.append(data.part)
ratio = 1
J = len(nozzle_type)
N = 10000
M = machine_num
H = max_head_index
I = len(component_data)
S = sum([data.fdn * ratio for _, data in component_data.iterrows()])
K = math.ceil(len(pcb_data) * 1.0 / M) + 1
# K = len(pcb_data)
CompOfNozzle = [[0 for _ in range(J)] for _ in range(I)] # Compatibility
component_point = [0 for _ in range(I)]
for idx, data in component_data.iterrows():
nozzle = component_data.iloc[idx].nz
CompOfNozzle[idx][nozzle_type.index(nozzle)] = 1
component_point[idx] = data.points
# objective related
g = mdl.addVars(list_range(K), list_range(M), vtype=GRB.BINARY)
d = mdl.addVars(list_range(K), list_range(H), list_range(M), vtype=GRB.INTEGER)
u = mdl.addVars(list_range(I), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
v = mdl.addVars(list_range(S), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
w = mdl.addVars(list_range(J), list_range(K), list_range(H), list_range(M), vtype=GRB.BINARY)
d_plus = mdl.addVars(list_range(J), list_range(K), list_range(H), list_range(M), vtype=GRB.CONTINUOUS)
d_minus = mdl.addVars(list_range(J), list_range(K), list_range(H), list_range(M), vtype=GRB.CONTINUOUS)
z = mdl.addVars(list_range(K), list_range(M), vtype=GRB.INTEGER)
e = mdl.addVars(list_range(-(H - 1) * ratio, S), list_range(K), list_range(M), vtype=GRB.BINARY)
f = mdl.addVars(list_range(S), list_range(I), list_range(M), vtype=GRB.BINARY, name='')
t = mdl.addVars(list_range(M), lb=0, ub=N, vtype=GRB.CONTINUOUS)
obj = mdl.addVar(lb=0, ub=N, vtype=GRB.CONTINUOUS)
mdl.addConstrs(g[k, m] >= g[k + 1, m] for k in range(K - 1) for m in range(M))
mdl.addConstrs(
quicksum(u[i, k, h, m] for i in range(I)) <= g[k, m] for k in range(K) for h in range(H) for m in range(M))
# nozzle no more than 1 for head h and cycle k
mdl.addConstrs(quicksum(w[j, k, h, m] for j in range(J)) <= 1 for k in range(K) for h in range(H) for m in range(M))
# work completion
mdl.addConstrs(
quicksum(u[i, k, h, m] for k in range(K) for h in range(H) for m in range(M)) == component_point[i] for i in
range(I))
# nozzle change
mdl.addConstrs(
u[i, k, h, m] <= quicksum(CompOfNozzle[i][j] * w[j, k, h, m] for j in range(J)) for i in range(I) for k in
range(K) for h in range(H) for m in range(M))
mdl.addConstrs(w[j, k, h, m] - w[j, k + 1, h, m] == d_plus[j, k, h, m] - d_minus[j, k, h, m] for k in
range(K - 1) for j in range(J) for h in range(H) for m in range(M))
mdl.addConstrs(w[j, 0, h, m] - w[j, K - 1, h, m] == d_plus[j, K - 1, h, m] - d_minus[j, K - 1, h, m]
for j in range(J) for h in range(H) for m in range(M))
mdl.addConstrs(d[k, h, m] == quicksum(d_plus[j, k, h, m] for j in range(J)) + quicksum(
d_minus[j, k, h, m] for j in range(J)) for k in range(K) for h in range(H) for m in range(M))
# simultaneous pick
for s in range(-(H - 1) * ratio, S):
rng = list(range(max(0, -math.floor(s / ratio)), min(H, math.ceil((S - s) / ratio))))
for k in range(K):
mdl.addConstrs(
quicksum(u[i, k, h, m] * v[s + h * ratio, k, h, m] for h in rng for i in range(I)) <= N * e[s, k, m] for
m in range(M))
mdl.addConstrs(
quicksum(u[i, k, h, m] * v[s + h * ratio, k, h, m] for h in rng for i in range(I)) >= e[s, k, m] for m
in range(M))
# head - feeder slot relationship
mdl.addConstrs(
quicksum(v[s, k, h, m] for s in range(S)) == quicksum(u[i, k, h, m] for i in range(I)) for h in range(H) for k
in range(K) for m in range(M))
# feeder related
mdl.addConstrs(
quicksum(f[s, i, m] for s in range(S) for m in range(M)) <= component_data.iloc[i].fdn for i in range(I))
mdl.addConstrs(quicksum(f[s, i, m] for i in range(I)) <= 1 for s in range(S) for m in range(M))
mdl.addConstrs(
quicksum(u[i, k, h, m] * v[s, k, h, m] for h in range(H) for k in range(K)) >= f[s, i, m] for i in range(I) for
s in range(S) for m in range(M))
mdl.addConstrs(
quicksum(u[i, k, h, m] * v[s, k, h, m] for h in range(H) for k in range(K)) <= N * f[s, i, m] for i in range(I)
for s in range(S) for m in range(M))
# pickup movement
mdl.addConstrs(z[k, m] >= s1 * e[s1, k, m] - s2 * e[s2, k, m] + N * (e[s1, k, m] + e[s2, k, m] - 2) for s1 in
range(-(H - 1) * ratio, S) for s2 in range(-(H - 1) * ratio, S) for k in range(K) for m in range(M))
# objective
mdl.addConstrs(t[m] == Fit_cy * quicksum(g[k, m] for k in range(K)) + Fit_nz * quicksum(
d[k, h, m] for h in range(H) for k in range(K)) + Fit_pu * quicksum(
e[s, k, m] for s in range(-(H - 1) * ratio, S) for k in range(K)) + Fit_pl * quicksum(
u[i, k, h, m] for i in range(I) for k in range(K) for h in range(H)) + Fit_mv * quicksum(
z[k, m] for k in range(K)) for m in range(M))
for m in range(M - 1):
mdl.addConstr(t[m] >= t[m + 1])
mdl.addConstrs(obj >= t[m] for m in range(M))
mdl.setObjective(obj, GRB.MINIMIZE)
mdl.optimize()
for m in range(M):
print(f'machine {m} : cycle : {sum(g[k, m].x for k in range(K))}, '
f'nozzle change : {sum(d[k, h, m].x for h in range(H) for k in range(K))}, '
f'pick up : {sum(e[s, k, m].x for s in range(-(H - 1) * ratio, S) for k in range(K))}, '
f'placement : {sum(u[i, k, h, m].x for i in range(I) for k in range(K) for h in range(H))}, '
f'pick movement : {sum(z[k, m].x for k in range(K))}')
pcb_part_indices = defaultdict(list)
for idx, data in pcb_data.iterrows():
pcb_part_indices[data.part].append(idx)
assembly_info = []
for m in range(M):
partial_component_data, partial_pcb_data = copy.deepcopy(component_data), pd.DataFrame(columns=pcb_data.columns)
partial_component_data['points'] = 0
component_result, cycle_result, feeder_slot_result = [], [], []
head_place_pos = []
for k in range(K):
if abs(g[k, m].x) < 1e-3:
continue
component_result.append([-1 for _ in range(H)])
cycle_result.append(1)
feeder_slot_result.append([-1 for _ in range(H)])
for h in range(H):
for i in range(I):
if abs(u[i, k, h, m].x) < 1e-3:
continue
component_result[-1][h] = i
idx = pcb_part_indices[component_data.iloc[i].part][0]
partial_pcb_data = pd.concat([partial_pcb_data, pd.DataFrame(pcb_data.iloc[idx]).T])
head_place_pos.append(pcb_data.iloc[idx].x - h * head_interval)
pcb_part_indices[component_data.iloc[i].part].pop(0)
partial_component_data.loc[i, 'points'] += 1
for s in range(S):
if abs(v[s, k, h, m].x) < 1e-3:
continue
feeder_slot_result[-1][h] = s
if sum(component_result[-1]) == -max_head_index:
component_result.pop(-1)
cycle_result.pop(-1)
feeder_slot_result.pop(-1)
average_pos = round(
(sum(head_place_pos) / len(head_place_pos) + stopper_pos[0] - slotf1_pos[0] + 1) / slot_interval)
for k in range(len(feeder_slot_result)):
for h in range(H):
if feeder_slot_result[k][h] == -1:
continue
feeder_slot_result[k][h] = feeder_slot_result[k][h] * 2 + average_pos
placement_result, head_sequence = place_allocate_sequence_route_generation(partial_component_data,
partial_pcb_data,
component_result, cycle_result,
feeder_slot_result, hinter=False)
opt_res = OptResult(component_result, cycle_result, feeder_slot_result, placement_result, head_sequence)
info = placement_info_evaluation(partial_component_data, partial_pcb_data, opt_res, hinter=hinter)
if hinter:
print('----- Placement machine ' + str(m + 1) + ' ----- ')
optimization_assign_result(partial_component_data, partial_pcb_data, opt_res, nozzle_hinter=True,
component_hinter=True, feeder_hinter=True)
info.print()
assembly_info.append(info)
print('------------------------------ ')
return assembly_info

650
lineopt_reconfiguration.py Normal file
View File

@@ -0,0 +1,650 @@
from base_optimizer.optimizer_common import *
from estimator import *
def random_component_assignment(pcb_data, component_data, machine_number, estimator=None):
# == the set of feasible component type for each nozzle type
nozzle_part_list = defaultdict(list)
component_points = []
for idx, data in component_data.iterrows():
component_points.append(data.points)
nozzle_part_list[data.nz].append(idx)
component_number = len(component_data)
assignment_result = [[0 for _ in range(component_number)] for _ in range(machine_number)]
# === ensure every nozzle types ===
selected_part = []
for part_list in nozzle_part_list.values():
part = random.sample(part_list, 1)[0]
machine_index = random.randint(0, machine_number - 1)
assignment_result[machine_index][part] += 1
component_points[part] -= 1
selected_part.append(part)
# === assign one placement which has not been selected ===
for part in range(component_number):
if part in selected_part:
continue
assignment_result[random.randint(0, machine_number - 1)][part] += 1
component_points[part] -= 1
machine_assign = list(range(machine_number))
random.shuffle(machine_assign)
finished_assign_counter = component_points.count(0)
while finished_assign_counter < component_number:
for machine_index in machine_assign:
part = random.randint(0, component_number - 1)
feeder_counter = 0
for idx in range(machine_number):
if assignment_result[idx][part] > 0 or idx == machine_index:
feeder_counter += 1
if component_points[part] == 0 or feeder_counter > component_data.iloc[part].fdn:
continue
# feeder limit restriction
points = random.randint(1, component_points[part])
assignment_result[machine_index][part] += points
component_points[part] -= points
if component_points[part] == 0:
finished_assign_counter += 1
assert sum(component_points) == 0
objective_value = 0
cp_items = converter(pcb_data, component_data, assignment_result)
for machine_index in range(machine_number):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
objective_value = max(objective_value, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
return objective_value, assignment_result
def greedy_component_assignment(component_points, component_nozzle, component_feeders, task_block_weight):
pass # 不清楚原文想说什么
def local_search_component_assignment(pcb_data, component_data, machine_number, estimator):
# maximum number of iterations : 5000
# maximum number of unsuccessful iterations: 50
component_number = len(component_data)
iteration_counter, unsuccessful_iteration_counter = 5000, 50
optimal_val, optimal_assignment = random_component_assignment(pcb_data, component_data, machine_number, estimator)
for _ in range(iteration_counter):
machine_idx = random.randint(0, machine_number - 1)
if sum(optimal_assignment[machine_idx]) == 0:
continue
part_set = []
for part_idx in range(component_number):
if optimal_assignment[machine_idx][part_idx] != 0:
part_set.append(part_idx)
part_idx = random.sample(part_set, 1)[0]
r = random.randint(1, optimal_assignment[machine_idx][part_idx])
assignment = copy.deepcopy(optimal_assignment)
cyclic_counter = 0
swap_machine_idx = None
swap_available = False
while cyclic_counter <= 2 * machine_number:
cyclic_counter += 1
swap_machine_idx = random.randint(0, machine_number - 1)
feeder_available = 0
for machine in range(machine_number):
if optimal_assignment[machine][part_idx] or machine == swap_machine_idx:
feeder_available += 1
if feeder_available <= component_data.iloc[part_idx].fdn and swap_machine_idx != machine_idx:
swap_available = True
break
assert swap_machine_idx is not None
if swap_available:
assignment[machine_idx][part_idx] -= r
assignment[swap_machine_idx][part_idx] += r
val = 0
cp_items = converter(pcb_data, component_data, assignment)
for machine_index in range(machine_number):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
if val < optimal_val:
optimal_assignment, optimal_val = assignment, val
unsuccessful_iteration_counter = 50
else:
unsuccessful_iteration_counter -= 1
if unsuccessful_iteration_counter <= 0:
break
return optimal_val, optimal_assignment
def reconfig_crossover_operation(component_data, parent1, parent2, machine_number):
offspring1, offspring2 = copy.deepcopy(parent1), copy.deepcopy(parent2)
component_number = len(component_data)
# === crossover ===
mask_bit = []
for _ in range(machine_number):
mask_bit.append(random.randint(0, 1))
if sum(mask_bit) == 0 or sum(mask_bit) == machine_number:
return offspring1, offspring2
for machine_index in range(machine_number):
if mask_bit:
offspring1[machine_index] = copy.deepcopy(parent1[machine_index])
offspring2[machine_index] = copy.deepcopy(parent2[machine_index])
else:
offspring1[machine_index] = copy.deepcopy(parent2[machine_index])
offspring2[machine_index] = copy.deepcopy(parent1[machine_index])
# === balancing ===
# equally to reach the correct number
for part_index in range(component_number):
for offspring in [offspring1, offspring2]:
additional_points = sum([offspring[mt][part_index] for mt in range(machine_number)]) - \
component_data.iloc[part_index]['points']
if additional_points > 0:
# if a component type has more placements, decrease the assigned values on every head equally keeping
# the proportion of the number of placement among the heads
points_list = []
for machine_index in range(machine_number):
points = math.floor(
additional_points * offspring[machine_index][part_index] / component_data[part_index]['points'])
points_list.append(points)
offspring[machine_index][part_index] -= points
additional_points -= sum(points_list)
for machine_index in range(machine_number):
if additional_points == 0:
break
if offspring[machine_index][part_index] == 0:
continue
offspring[machine_index][part_index] -= 1
additional_points += 1
elif additional_points < 0:
# otherwise, increase the assigned nonzero values equally
machine_set = []
for machine_index in range(machine_number):
if offspring[machine_index][part_index] == 0:
continue
machine_set.append(machine_index)
points = -math.ceil(additional_points / len(machine_set))
for machine_index in machine_set:
offspring[machine_index][part_index] += points
additional_points += points
for machine_index in machine_set:
if additional_points == 0:
break
offspring[machine_index][part_index] += 1
additional_points -= 1
return offspring1, offspring2
def reconfig_mutation_operation(component_data, parent, machine_number):
offspring = copy.deepcopy(parent)
swap_direction = random.randint(0, 1)
if swap_direction:
swap_machine1, swap_machine2 = random.sample(list(range(machine_number)), 2)
else:
swap_machine2, swap_machine1 = random.sample(list(range(machine_number)), 2)
component_list = []
for component_index, points in enumerate(offspring[swap_machine1]):
if points:
component_list.append(component_index)
if len(component_list) == 0:
return offspring
swap_component_index = random.sample(component_list, 1)[0]
swap_points = random.randint(1, offspring[swap_machine1][swap_component_index])
offspring[swap_machine1][swap_component_index] -= swap_points
offspring[swap_machine2][swap_component_index] += swap_points
feeder_counter = 0
for machine_index in range(machine_number):
if offspring[machine_index][swap_component_index]:
feeder_counter += 1
if feeder_counter > component_data.iloc[swap_component_index].fdn:
return parent
return offspring
def evolutionary_component_assignment(pcb_data, component_data, machine_number, estimator):
# population size: 10
# probability of the mutation: 0.1
# probability of the crossover: 0.8
# number of generation: 100
population_size = 10
generation_number = 100
mutation_rate, crossover_rate = 0.1, 0.8
population, pop_val = [], []
for _ in range(population_size):
population.append(random_component_assignment(pcb_data, component_data, machine_number, estimator)[1])
cp_items = converter(pcb_data, component_data, population[-1])
val = 0
for machine_index in range(machine_number):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
pop_val.append(val)
with tqdm(total=generation_number) as pbar:
pbar.set_description('evolutionary algorithm process for PCB assembly line balance')
new_population = []
for _ in range(generation_number):
population += new_population
# calculate fitness value
for individual in new_population:
val = 0
cp_items = converter(pcb_data, component_data, individual)
for machine_index in range(machine_number):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, estimator.predict(cp_points, cp_nozzle, board_width, board_height))
pop_val.append(val)
select_index = get_top_k_value(pop_val, population_size, reverse=False)
population = [population[idx] for idx in select_index]
pop_val = [pop_val[idx] for idx in select_index]
# min-max convert
max_val = max(pop_val)
pop_val_sel = list(map(lambda v: max_val - v, pop_val))
sum_pop_val = sum(pop_val_sel) + 1e-10
pop_val_sel = [v / sum_pop_val + 1e-3 for v in pop_val_sel]
# crossover and mutation
new_population = []
for pop in range(population_size):
if pop % 2 == 0 and np.random.random() < crossover_rate:
index1 = roulette_wheel_selection(pop_val_sel)
while True:
index2 = roulette_wheel_selection(pop_val_sel)
if index1 != index2:
break
offspring1, offspring2 = reconfig_crossover_operation(component_data, population[index1],
population[index2], machine_number)
if np.random.random() < mutation_rate:
offspring1 = reconfig_mutation_operation(component_data, offspring1, machine_number)
if np.random.random() < mutation_rate:
offspring2 = reconfig_mutation_operation(component_data, offspring2, machine_number)
new_population.append(offspring1)
new_population.append(offspring2)
pbar.update(1)
return min(pop_val), population[np.argmin(pop_val)]
class SpiderMonkeyOpt:
def __init__(self, pop_size, pcb_data, component_data, machine_number, estimator):
self.PcbData = pcb_data
self.ComponentData = component_data
self.Estimator = estimator
self.PopSize = pop_size
self.LocalLimit = pop_size
self.GlobalLimit = pop_size
self.MachineNum = machine_number
self.GroupSize = 0
# self.Dim = sum(data.fdn for _, data in component_data.iterrows()) + machine_number
self.CpPoints = defaultdict(int)
self.CpIndex = defaultdict(int)
self.CpNozzle = defaultdict(str)
self.Dim = 0
for cp_idx, data in component_data.iterrows():
# cp_feeders[cp_idx] = data.fdn
division_data = copy.deepcopy(data)
feeder_limit, total_points = division_data.fdn, division_data.points
surplus_points = total_points % feeder_limit
for _ in range(feeder_limit):
division_data.fdn, division_data.points = 1, math.floor(total_points / feeder_limit)
if surplus_points:
division_data.points += 1
surplus_points -= 1
self.CpPoints[self.Dim], self.CpNozzle[self.Dim] = division_data.points, division_data.nz
self.CpIndex[self.Dim] = cp_idx
self.Dim += 1
self.Dim += machine_number
component_list = list(range(len(self.CpPoints)))
self.GenPosition = []
self.GenPopVal = []
for _ in range(self.PopSize):
random.shuffle(component_list)
self.GenPosition.append(component_list.copy())
idx, prev = 0, 0
div = random.sample(range(len(component_list)), self.MachineNum - 1)
div.append(len(component_list))
div.sort(reverse=False)
for _ in range(1, self.MachineNum + 1):
self.GenPosition[-1].append(div[idx] - prev)
prev = div[idx]
idx += 1
self.GenPopVal.append(self.CalIndividualVal(self.GenPosition[-1]))
self.GroupPoint = [[0 for _ in range(2)] for _ in range(self.PopSize)]
self.GroupPart = 1
self.GenProb = None
self.GlobalMin = self.GenPopVal[0]
self.GlobalLeaderPosition = self.GenPosition[0].copy()
self.GlobalLimitCount = 0
self.LocalMin = np.ones(self.PopSize) * 1e10
self.LocalLeaderPosition = [[0 for _ in range(self.Dim)] for _ in range(self.PopSize)]
self.LocalLimitCount = [0 for _ in range(self.PopSize)]
for k in range(self.GroupSize):
self.LocalMin[k] = self.GenPopVal[int(self.GroupPoint[k, 0])]
self.LocalLeaderPosition[k,:] = self.GenPosition[int(self.GroupPoint[k, 0]),:]
self.CrossoverRatio = 0.1
def GlobalLearning(self):
GlobalTrial = self.GlobalMin
for i in range(self.PopSize):
if self.GenPopVal[i] < self.GlobalMin:
self.GlobalMin = self.GenPopVal[i]
self.GlobalLeaderPosition = self.GenPosition[i].copy()
if math.fabs(GlobalTrial - self.GlobalMin) < 1e-5:
self.GlobalLimitCount = self.GlobalLimitCount + 1
else:
self.GlobalLimitCount = 0
def LocalLearning(self):
OldMin = np.zeros(self.PopSize)
for k in range(self.GroupSize):
OldMin[k] = self.LocalMin[k]
for k in range(self.GroupSize):
i = int(self.GroupPoint[k][0])
while i <= int(self.GroupPoint[k][1]):
if self.GenPopVal[i] < self.LocalMin[k]:
self.LocalMin[k] = self.GenPopVal[i]
self.LocalLeaderPosition[k] = self.GenPosition[i].copy()
i = i + 1
for k in range(self.GroupSize):
if math.fabs(OldMin[k] - self.LocalMin[k]) < 1e-5:
self.LocalLimitCount[k] = self.LocalLimitCount[k] + 1
else:
self.LocalLimitCount[k] = 0
def CalculateProbabilities(self):
self.GenProb = [0 for _ in range(self.PopSize)]
MaxVal = self.GenPopVal[0]
i = 1
while i < self.PopSize:
if self.GenPopVal[i] > MaxVal:
MaxVal = self.GenPopVal[i]
i += 1
for i in range(self.PopSize):
self.GenProb[i] = (0.9 * (self.GenPopVal[i] / MaxVal)) + 0.1
def LocalLeaderPhase(self, k):
lo = int(self.GroupPoint[k][0])
hi = int(self.GroupPoint[k][1])
i = lo
while i <= hi:
NewGene1, NewGene2 = self.CrossoverOperation(self.GenPosition[i], self.LocalLeaderPosition[k])
NewGeneVal1, NewGeneVal2 = self.CalIndividualVal(NewGene1), self.CalIndividualVal(NewGene2)
if NewGeneVal1 < self.GenPopVal[i]:
self.GenPosition[i] = NewGene1
self.GenPopVal[i] = NewGeneVal1
if NewGeneVal2 < self.GenPopVal[i]:
self.GenPosition[i] = NewGene2
self.GenPopVal[i] = NewGeneVal2
i += 1
def GlobalLeaderPhase(self, k):
lo = int(self.GroupPoint[k][0])
hi = int(self.GroupPoint[k][1])
i = lo
l = lo
while l < hi:
if random.random() < self.GenProb[i]:
l += 1
NewGene1, NewGene2 = self.CrossoverOperation(self.GenPosition[i], self.GlobalLeaderPosition)
NewGeneVal1, NewGeneVal2 = self.CalIndividualVal(NewGene1), self.CalIndividualVal(NewGene2)
if NewGeneVal1 < self.GenPopVal[i]:
self.GenPosition[i] = NewGene1
self.GenPopVal[i] = NewGeneVal1
if NewGeneVal2 < self.GenPopVal[i]:
self.GenPosition[i] = NewGene2
self.GenPopVal[i] = NewGeneVal2
i += 1
if i == hi:
i = lo
def LocalLeaderDecision(self):
for k in range(self.GroupSize):
if self.LocalLimitCount[k] > self.LocalLimit:
i = self.GroupPoint[k][0]
while i <= int(self.GroupPoint[k][1]):
if random.random() >= self.CrossoverRatio:
NewGenPosition = list(range(self.Dim - self.MachineNum))
random.shuffle(NewGenPosition)
idx, prev = 0, 0
div = random.sample(range(len(NewGenPosition)), self.MachineNum - 1)
div.append(len(NewGenPosition))
div.sort(reverse=False)
for _ in range(1, self.MachineNum + 1):
NewGenPosition.append(div[idx] - prev)
prev = div[idx]
idx += 1
NewGenVal = self.CalIndividualVal(NewGenPosition)
if NewGenVal < self.GenPopVal[i]:
self.GenPosition[i] = NewGenPosition.copy()
self.GenPopVal[i] = NewGenVal
else:
NewGene1, NewGene2 = self.CrossoverOperation(self.GenPosition[i], self.GlobalLeaderPosition)
NewGeneVal1, NewGeneVal2 = self.CalIndividualVal(NewGene1), self.CalIndividualVal(NewGene2)
if NewGeneVal1 < self.GenPopVal[i]:
self.GenPosition[i] = NewGene1.copy()
self.GenPopVal[i] = NewGeneVal1
if NewGeneVal2 < self.GenPopVal[i]:
self.GenPosition[i] = NewGene2.copy()
self.GenPopVal[i] = NewGeneVal2
i += 1
self.LocalLimitCount[k] = 0
def GlobalLeaderDecision(self):
if self.GlobalLimitCount> self.GlobalLimit:
self.GroupPart += 1
self.GlobalLimitCount = 0
self.CreateGroup()
self.LocalLearning()
def CreateGroup(self):
g = 0
lo = 0
while lo < self.PopSize:
hi = lo + int(self.PopSize / self.GroupPart)
self.GroupPoint[g][0] = lo
self.GroupPoint[g][1] = hi
if self.PopSize - hi < int(self.PopSize / self.GroupPart):
self.GroupPoint[g][1] = (self.PopSize - 1)
g = g + 1
lo = hi + 1
self.GroupSize = g
def CrossoverOperation(self, gene1, gene2):
len_ = len(gene1)
sub1, sub2 = partially_mapped_crossover(gene1[0: len_ - self.MachineNum], gene2[0: len_ - self.MachineNum])
pos1, pos2 = random.randint(0, self.MachineNum - 1), random.randint(0, self.MachineNum - 1)
machine_assign1, machine_assign2 = gene1[len_ - self.MachineNum:], gene2[len_ - self.MachineNum:]
machine_assign1[pos1], machine_assign2[pos2] = machine_assign2[pos2], machine_assign1[pos1]
while sum(machine_assign1) != len_ - self.MachineNum:
machine_idx = random.randint(0, self.MachineNum - 1)
if machine_assign1[machine_idx] == 0:
continue
if sum(machine_assign1) > len_ - self.MachineNum:
machine_assign1[machine_idx] -= 1
else:
machine_assign1[machine_idx] += 1
while sum(machine_assign2) != len_ - self.MachineNum:
machine_idx = random.randint(0, self.MachineNum - 1)
if machine_assign2[machine_idx] == 0:
continue
if sum(machine_assign2) > len_ - self.MachineNum:
machine_assign2[machine_idx] -= 1
else:
machine_assign2[machine_idx] += 1
sub1.extend(machine_assign1)
sub2.extend(machine_assign2)
return sub1, sub2
def CalIndividualVal(self, gene):
ComponentNum = len(self.ComponentData)
assignment_result = [[0 for _ in range(ComponentNum)] for _ in range(self.MachineNum)]
idx, machine_index = 0, 0
for num in range(self.Dim - self.MachineNum, self.Dim):
for _ in range(gene[num]):
assignment_result[machine_index][self.CpIndex[gene[idx]]] += self.CpPoints[gene[idx]]
idx += 1
machine_index += 1
val = 0
cp_items = converter(self.PcbData, self.ComponentData, assignment_result)
for machine_index in range(self.MachineNum):
cp_points, cp_nozzle, board_width, board_height = cp_items[machine_index]
val = max(val, self.Estimator.predict(cp_points, cp_nozzle, board_width, board_height))
return val
def spider_monkey_component_assignment(pcb_data, component_data, machine_number, estimator):
population_size, iteration_number = 20, 50
smo = SpiderMonkeyOpt(population_size, pcb_data, component_data, machine_number, estimator)
# ========================== Calling: GlobalLearning() ======================== #
smo.GlobalLearning()
# ========================== Calling: create_group() ========================== #
smo.CreateGroup()
# ========================= Calling: LocalLearning() ========================== #
smo.LocalLearning()
# ================================= Looping ================================== #
with tqdm(total=iteration_number) as pbar:
pbar.set_description('spider monkey algorithm process for PCB assembly line balance')
for _ in range(iteration_number):
for k in range(smo.GroupSize):
# ==================== Calling: LocalLeaderPhase() =================== #
smo.LocalLeaderPhase(k)
# =================== Calling: CalculateProbabilities() ================== #
smo.CalculateProbabilities()
for k in range(smo.GroupSize):
# ==================== Calling: GlobalLeaderPhase() ================== #
smo.GlobalLeaderPhase(k)
# ======================= Calling: GlobalLearning() ====================== #
smo.GlobalLearning()
# ======================= Calling: LocalLearning() ======================= #
smo.LocalLearning()
# ================== Calling: LocalLeaderDecision() ====================== #
smo.LocalLeaderDecision()
# ===================== Calling: GlobalLeaderDecision() ================== #
smo.GlobalLeaderDecision()
pbar.update(1)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
idx, machine_index = 0, 0
for num in range(smo.Dim - machine_number, smo.Dim):
for _ in range(smo.GlobalLeaderPosition[num]):
assignment_result[machine_index][smo.CpIndex[smo.GlobalLeaderPosition[idx]]] += \
smo.CpPoints[smo.GlobalLeaderPosition[idx]]
idx += 1
machine_index += 1
return smo.GlobalMin, assignment_result
@timer_wrapper
def line_optimizer_reconfiguration(component_data, pcb_data, machine_number):
# === assignment of heads to modules is omitted ===
optimal_assignment, optimal_val = [], None
estimator = ReconfigEstimator() # element from list [0, 1, 2, 5, 10] task_block ~= cycle
# === assignment of components to heads
for i in range(5):
if i == 0:
# random
print('random component allocation algorithm process for PCB assembly line balance')
val, assignment = random_component_assignment(pcb_data, component_data, machine_number, estimator)
elif i == 1:
# spider monkey
val, assignment = spider_monkey_component_assignment(pcb_data, component_data, machine_number, estimator)
elif i == 2:
# local search
print('local search component allocation algorithm process for PCB assembly line balance')
val, assignment = local_search_component_assignment(pcb_data, component_data, machine_number, estimator)
elif i == 3:
# evolutionary
val, assignment = evolutionary_component_assignment(pcb_data, component_data, machine_number, estimator)
else:
# brute force
# which is proved to be useless, since it only ran in reasonable time for the smaller test instances
continue
if optimal_val is None or val < optimal_val:
optimal_val, optimal_assignment = val, assignment.copy()
if optimal_val is None:
raise Exception('no feasible solution! ')
return optimal_assignment

View File

@@ -1,250 +1,233 @@
import copy
import math
import matplotlib.pyplot as plt
import pandas as pd
from base_optimizer.optimizer_scanbased import *
from base_optimizer.optimizer_celldivision import *
from base_optimizer.optimizer_hybridgenetic import *
from base_optimizer.optimizer_feederpriority import *
from dataloader import *
from lineopt_genetic import line_optimizer_genetic
from lineopt_heuristic import line_optimizer_heuristic
from lineopt_reconfiguration import line_optimizer_reconfiguration
from lineopt_hyperheuristic import line_optimizer_hyperheuristic
from lineopt_model import line_optimizer_model
from optimizer_genetic import *
from optimizer_heuristic import *
from base_optimizer.optimizer_interface import *
def deviation(data):
assert len(data) > 0
average, variance = sum(data) / len(data), 0
for v in data:
variance += (v - average) ** 2
return variance / len(data)
def optimizer(pcb_data, component_data, feeder_data, params, hinter=True):
if params.machine_number == 1:
assembly_info = [base_optimizer(1, pcb_data, component_data, feeder_data, params, hinter=hinter)]
return assembly_info
def optimizer(pcb_data, component_data, assembly_line_optimizer, single_machine_optimizer):
# todo: 由于吸嘴更换更因素的存在在处理PCB8数据时遗传算法因在负载均衡过程中对这一因素进行了考虑性能更优
# assignment_result = assemblyline_optimizer_heuristic(pcb_data, component_data)
assignment_result = assemblyline_optimizer_genetic(pcb_data, component_data)
print(assignment_result)
assignment_result_cpy = copy.deepcopy(assignment_result)
placement_points, placement_time = [], []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(max_machine_index):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(max_machine_index)]
for machine_index in range(max_machine_index):
if feeder_points[machine_index] == 0:
continue
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index]['feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder
for machine_index in range(max_machine_index):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index]['feeder-limit'] += 1
feeder_limit -= 1
for machine_index in range(max_machine_index):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index]['feeder-limit'] > 0
# === assign placements ===
component_machine_index = [0 for _ in range(len(component_data))]
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == 0:
component_machine_index[part_index] += 1
machine_index += 1
if params.line_optimizer == 'hyper-heuristic' or params.line_optimizer == 'heuristic' or params.line_optimizer \
== 'genetic' or params.line_optimizer == 'reconfiguration':
if params.line_optimizer == 'hyper-heuristic':
assignment_result = line_optimizer_hyperheuristic(component_data, pcb_data, params.machine_number)
elif params.line_optimizer == "heuristic":
assignment_result = line_optimizer_heuristic(component_data, params.machine_number)
elif params.line_optimizer == "genetic":
assignment_result = line_optimizer_genetic(component_data, params.machine_number)
else:
break
assignment_result[machine_index][part_index] -= 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
assignment_result = line_optimizer_reconfiguration(component_data, pcb_data, params.machine_number)
# === adjust the number of available feeders for single optimization separately ===
for machine_index, data in partial_pcb_data.items():
data = data.reset_index(drop=True)
if len(data) == 0:
continue
part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
for part_index, cp_data in partial_component_data[machine_index].iterrows():
if assignment_result_cpy[machine_index][part_index]:
part_info.append(
[part_index, assignment_result_cpy[machine_index][part_index], 1, cp_data['feeder-limit']])
part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
while start_index < len(part_info):
assign_part_point, assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
assign_part_index.append(idx_)
variance = deviation(assign_part_point)
while start_index != end_index:
part_info_index = assign_part_index[np.argmax(assign_part_point)]
if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
part_info[part_info_index][2] += 1
end_index -= 1
new_assign_part_point, new_assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
new_assign_part_index.append(idx_)
new_variance = deviation(new_assign_part_point)
if variance < new_variance:
part_info[part_info_index][2] -= 1
end_index += 1
break
variance = new_variance
assign_part_index, assign_part_point = new_assign_part_index, new_assign_part_point
partial_pcb_data, partial_component_data = convert_line_assigment(pcb_data, component_data, assignment_result)
assembly_info = []
for machine_index in range(params.machine_number):
assembly_info.append(base_optimizer(machine_index + 1, partial_pcb_data[machine_index],
partial_component_data[machine_index],
pd.DataFrame(columns=['slot', 'part']), params, hinter=hinter))
elif params.line_optimizer == 'mip-model':
assembly_info = line_optimizer_model(component_data, pcb_data, params.machine_number)
else:
break
raise 'line optimizer method is not existed'
start_index = end_index + 1
end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
# update available feeder number
max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
for info in part_info:
partial_component_data[machine_index].loc[info[0]]['feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
placement_time.append(base_optimizer(machine_index + 1, data, partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method=single_machine_optimizer, hinter=True))
average_time, standard_deviation_time = sum(placement_time) / max_machine_index, 0
for machine_index in range(max_machine_index):
print('assembly time for machine ' + str(machine_index + 1) + ': ' + str(
placement_time[machine_index]) + ' s, ' + 'total placements: ' + str(placement_points[machine_index]))
standard_deviation_time += pow(placement_time[machine_index] - average_time, 2)
standard_deviation_time /= max_machine_index
standard_deviation_time = math.sqrt(standard_deviation_time)
print('finial assembly time: ' + str(max(placement_time)) + 's, standard deviation: ' + str(standard_deviation_time))
# todo: 不同类型元件的组装时间差异
def base_optimizer(machine_index, pcb_data, component_data, feeder_data=None, method='', hinter=False):
if method == 'cell_division': # 基于元胞分裂的遗传算法
component_result, cycle_result, feeder_slot_result = optimizer_celldivision(pcb_data, component_data,
hinter=False)
placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result,
cycle_result, feeder_slot_result)
elif method == 'feeder_scan': # 基于基座扫描的供料器优先算法
# 第1步分配供料器位置
nozzle_pattern = feeder_allocate(component_data, pcb_data, feeder_data, figure=False)
# 第2步扫描供料器基座确定元件拾取的先后顺序
component_result, cycle_result, feeder_slot_result = feeder_base_scan(component_data, pcb_data, feeder_data,
nozzle_pattern)
# 第3步贴装路径规划
placement_result, head_sequence = greedy_placement_route_generation(component_data, pcb_data, component_result,
cycle_result, feeder_slot_result)
# placement_result, head_sequence = beam_search_for_route_generation(component_data, pcb_data, component_result,
# cycle_result, feeder_slot_result)
elif method == 'hybrid_genetic': # 基于拾取组的混合遗传算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_hybrid_genetic(
pcb_data, component_data, hinter=False)
elif method == 'aggregation': # 基于batch-level的整数规划 + 启发式算法
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_aggregation(
component_data, pcb_data)
elif method == 'genetic_scanning':
component_result, cycle_result, feeder_slot_result, placement_result, head_sequence = optimizer_genetic_scanning(
component_data, pcb_data, hinter=False)
else:
raise 'method is not existed'
if hinter:
optimization_assign_result(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
nozzle_hinter=True, component_hinter=False, feeder_hinter=False)
print('----- Placement machine ' + str(machine_index) + ' ----- ')
print('-Cycle counter: {}'.format(sum(cycle_result)))
total_nozzle_change_counter, total_pick_counter = 0, 0
total_pick_movement = 0
assigned_nozzle = ['' if idx == -1 else component_data.loc[idx]['nz'] for idx in component_result[0]]
for cycle in range(len(cycle_result)):
pick_slot = set()
for head in range(max_head_index):
if (idx := component_result[cycle][head]) == -1:
continue
nozzle = component_data.loc[idx]['nz']
if nozzle != assigned_nozzle[head]:
if assigned_nozzle[head] != '':
total_nozzle_change_counter += 1
assigned_nozzle[head] = nozzle
pick_slot.add(feeder_slot_result[cycle][head] - head * interval_ratio)
total_pick_counter += len(pick_slot) * cycle_result[cycle]
pick_slot = list(pick_slot)
pick_slot.sort()
for idx in range(len(pick_slot) - 1):
total_pick_movement += abs(pick_slot[idx+1] - pick_slot[idx])
print('-Nozzle change counter: {}'.format(total_nozzle_change_counter))
print('-Pick operation counter: {}'.format(total_pick_counter))
print('-Pick movement: {}'.format(total_pick_movement))
print('------------------------------ ')
# 估算贴装用时
return placement_time_estimate(component_data, pcb_data, component_result, cycle_result, feeder_slot_result,
placement_result, head_sequence, hinter=False)
return assembly_info
@timer_wrapper
def main():
# warnings.simplefilter('ignore')
# 参数解析
parser = argparse.ArgumentParser(description='assembly line optimizer implementation')
parser.add_argument('--mode', default=1, type=int, help='mode: 0 -directly load pcb data without optimization '
'for data analysis, 1 -optimize pcb data, 2 -batch test')
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data')
parser.add_argument('--auto_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--base_optimizer', default='feeder_scan', type=str, help='base optimizer for single machine')
parser.add_argument('--assembly_optimizer', default='heuristic', type=str, help='optimizer for PCB Assembly Line')
parser.add_argument('--feeder_limit', default=1, type=int,
help='the upper feeder limit for each type of component')
parser.add_argument('--comp_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_optimizer', default='feeder-priority', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='hyper-heuristic', type=str, help='optimizer for PCB assembly line')
parser.add_argument('--feeder_limit', default=1, type=int, help='the upper feeder limit for each type of component')
parser.add_argument('--save', default=0, type=int, help='save the optimization result')
parser.add_argument('--save_suffix', default='(1)', type=str, help='load pcb data')
params = parser.parse_args()
# 结果输出显示所有行和列
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
if params.mode == 0:
partial_pcb_data, partial_component_data, _ = load_data(params.filename, load_feeder=False)
assembly_info = []
for machine_index in range(len(partial_pcb_data)):
opt_res = convert_pcbdata_to_result(partial_pcb_data[machine_index], partial_component_data[machine_index])
info = placement_info_evaluation(partial_component_data[machine_index], partial_pcb_data[machine_index],
opt_res)
assembly_info.append(info)
optimization_assign_result(partial_component_data[machine_index], partial_pcb_data[machine_index], opt_res,
nozzle_hinter=True, component_hinter=True, feeder_hinter=True)
info.print()
if params.save:
output_optimize_result(f'result/{params.filename[:-4]}-T-Solution-M0{machine_index + 1}',
partial_component_data[machine_index], partial_pcb_data[machine_index], opt_res)
print('------------------------------ ')
for machine_idx, info in enumerate(assembly_info):
print(f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
elif params.mode == 1:
# 加载PCB数据
pcb_data, component_data, _ = load_data(params.filename, default_feeder_limit=params.feeder_limit,
cp_auto_register=params.auto_register) # 加载PCB数据
partial_pcb_data, partial_component_data, feeder_data = load_data(params.filename, load_feeder=True)
pcb_data, component_data = merge_data(partial_pcb_data, partial_component_data)
start_time = time.time()
optimizer(pcb_data, component_data, params.assembly_optimizer, params.base_optimizer)
assembly_info = optimizer(pcb_data, component_data, feeder_data, params)
# sys.stdout = sys.__stdout__
print(f'optimizer running time: {time.time() - start_time: .3f}')
for machine_idx, info in enumerate(assembly_info):
print(f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
print(f'assembly metric evaluation {max(info.metric() for info in assembly_info)}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
elif params.mode == 2:
# sys.stdout = open(f'record/dissertation-experiment.txt', 'w')
machine_optimizer = ['two-phase', 'hybrid-genetic', 'cell-division', 'feeder-priority', 'aggregation']
running_round = 10
opt_columns = ['Cycle', 'Pick', 'Nozzle-Change', 'Running-Time']
opt_result, opt_runtime = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for opt in machine_optimizer:
opt_result[opt] = pd.DataFrame(columns=opt_columns)
opt_result[opt].index.name = 'file'
for _, file in enumerate(os.listdir('data/')):
if file[-3:] != 'txt':
continue
partial_pcb_data, partial_component_data, feeder_data = load_data(file)
pcb_data, component_data = merge_data(partial_pcb_data, partial_component_data)
for opt in machine_optimizer:
for round_idx in range(running_round):
print(f'--- file {file}, round {round_idx + 1}, optimizer {opt} --- ')
params = parser.parse_args(['--machine_optimizer', opt, '--machine_number', str(1), '--filename', file])
start_time = time.time()
assembly_info = optimizer(pcb_data, component_data, feeder_data, params, hinter=False)
opt_result[opt].loc[file + str(round_idx + 1), 'Cycle'] = assembly_info[0].cycle_counter
opt_result[opt].loc[file + str(round_idx + 1), 'Pick'] = assembly_info[0].pickup_counter
opt_result[opt].loc[file + str(round_idx + 1), 'Nozzle-Change'] = assembly_info[0].nozzle_change_counter
opt_result[opt].loc[file + str(round_idx + 1), 'Running-Time'] = time.time() - start_time
with pd.ExcelWriter('result/machine_optimizer.xlsx', engine='openpyxl') as writer:
for opt, result in opt_result.items():
result.to_excel(writer, sheet_name=opt, float_format='%.3f', na_rep='')
else:
line_optimizer = ['T-Solution', 'hyper-heuristic', 'genetic', 'reconfiguration']
file_dirs = ['L01', 'L02', 'L03']
running_round = 10
line_opt_result, line_opt_runtime = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
opt_columns = []
for line_opt in line_optimizer:
if line_opt == 'T-Solution':
opt_columns.append(line_opt)
else:
opt_columns.extend([line_opt + str(idx + 1) for idx in range(running_round)])
for file_dir in file_dirs:
line_opt_result[file_dir] = pd.DataFrame(columns=opt_columns)
line_opt_runtime[file_dir] = pd.DataFrame(columns=opt_columns)
line_opt_result[file_dir].index.name, line_opt_runtime[file_dir].index.name = 'file', 'file'
for file_index, file in enumerate(os.listdir('data/' + file_dir)):
sys.stdout = sys.__stdout__
print(f'--- {file_dir} : ({file_index + 1}) file {file} --- ')
try:
partial_pcb_data, partial_component_data, _ = load_data(file_dir + '/' + file, load_feeder=False)
except:
traceback.print_exc()
warning_info = f'file: {file_dir}/{file}: an unexpected error occurs for data loader'
warnings.warn(warning_info, SyntaxWarning)
continue
machine_number = len(partial_pcb_data)
if not os.path.exists(f'record/{file_dir}'):
os.makedirs(f'record/{file_dir}')
merge_pcb_data, merge_component_data = merge_data(partial_pcb_data, partial_component_data)
for line_opt in line_optimizer:
assembly_info = []
if line_opt == 'T-Solution':
sys.stdout = open(f'record/{file_dir}/{file[:-4]}-{line_opt}.txt', 'w')
for machine_index in range(machine_number):
opt_res = convert_pcbdata_to_result(partial_pcb_data[machine_index],
partial_component_data[machine_index])
print('----- Placement machine ' + str(machine_index + 1) + ' ----- ')
info = placement_info_evaluation(partial_component_data[machine_index],
partial_pcb_data[machine_index], opt_res, hinter=True)
print('------------------------------ ')
assembly_info.append(info)
if params.save:
output_optimize_result(f'result/{file_dir}/{file[:-4]}-T-Solution-M0{machine_index + 1}',
partial_component_data[machine_index],
partial_pcb_data[machine_index], opt_res)
line_opt_result[file_dir].loc[file, line_opt] = max(info.total_time for info in assembly_info)
for machine_idx, info in enumerate(assembly_info):
print(
f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, total placement: '
f'{info.total_points}, total component types {info.total_components: d}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
else:
for round_idx in range(running_round):
sys.stdout = open(f'record/{file_dir}/{file[:-4]}-{line_opt} ({round_idx + 1}).txt', 'w')
params = parser.parse_args(
['--filename', file_dir + '/' + file, '--machine_number', str(machine_number),
'--line_optimizer', line_opt, '--save_suffix', f'({round_idx + 1})'])
start_time = time.time()
assembly_info = optimizer(merge_pcb_data, merge_component_data, None, params)
line_opt_result[file_dir].loc[file, line_opt + str(round_idx + 1)] = max(
info.total_time for info in assembly_info)
line_opt_runtime[file_dir].loc[file, line_opt + str(round_idx + 1)] = time.time() - start_time
for machine_idx, info in enumerate(assembly_info):
print(
f'assembly time for machine {machine_idx + 1: d}: {info.total_time: .3f} s, '
f'total placement: {info.total_points}, '
f'total component types {info.total_components: d}')
print(f'finial assembly time: {max(info.total_time for info in assembly_info): .3f} s, '
f'standard deviation: {np.std([info.total_time for info in assembly_info]): .3f}')
with pd.ExcelWriter('result/line_optimizer.xlsx', engine='openpyxl') as writer:
for file_dir, result in line_opt_result.items():
result.to_excel(writer, sheet_name='result-' + file_dir, float_format='%.3f', na_rep='')
for file_dir, running_time in line_opt_runtime.items():
running_time.to_excel(writer, sheet_name='running_time-' + file_dir, float_format='%.3f', na_rep='')
if __name__ == '__main__':

View File

@@ -1,146 +0,0 @@
import math
import numpy as np
from base_optimizer.optimizer_common import *
# TODO: consider with the PCB placement topology
def assembly_time_estimator(component_points, component_feeders, component_nozzle, assignment_points):
# todo: how to deal with nozzle change
n_cycle, n_nz_change, n_gang_pick = 0, 0, 0
nozzle_heads, nozzle_points = defaultdict(int), defaultdict(int)
for idx, points in enumerate(assignment_points):
if points == 0:
continue
nozzle_points[component_nozzle[idx]] += points
nozzle_heads[component_nozzle[idx]] = 1
while sum(nozzle_heads.values()) != max_head_index:
max_cycle_nozzle = None
for nozzle, head_num in nozzle_heads.items():
if max_cycle_nozzle is None or nozzle_points[nozzle] / head_num > nozzle_points[max_cycle_nozzle] / \
nozzle_heads[max_cycle_nozzle]:
max_cycle_nozzle = nozzle
assert max_cycle_nozzle is not None
nozzle_heads[max_cycle_nozzle] += 1
n_cycle = max(map(lambda x: math.ceil(nozzle_points[x[0]] / x[1]), nozzle_heads.items()))
# calculate the number of simultaneous pickup
head_index, nozzle_cycle = 0, [[] for _ in range(max_head_index)]
for nozzle, heads in nozzle_heads.items():
head_index_cpy, points = head_index, nozzle_points[nozzle]
for _ in range(heads):
nozzle_cycle[head_index].append([nozzle, points // heads])
head_index += 1
points %= heads
while points:
nozzle_cycle[head_index_cpy][1] += 1
points -= 1
head_index_cpy += 1
# nozzle_cycle_index = [0 for _ in range(max_head_index)]
return n_cycle, n_nz_change, n_gang_pick
def assemblyline_optimizer_heuristic(pcb_data, component_data):
# the number of placement points, the number of available feeders, and nozzle type of component respectively
component_number = len(component_data)
component_points = [0 for _ in range(component_number)]
component_feeders = [0 for _ in range(component_number)]
component_nozzle = [0 for _ in range(component_number)]
component_part = [0 for _ in range(component_number)]
nozzle_points = defaultdict(int) # the number of placements of nozzle
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
nozzle = component_data.loc[part_index]['nz']
component_points[part_index] += 1
component_feeders[part_index] = component_data.loc[part_index]['feeder-limit']
# component_feeders[part_index] = math.ceil(component_data.loc[part_index]['feeder-limit'] / max_feeder_limit)
component_nozzle[part_index] = nozzle
component_part[part_index] = data['part']
nozzle_points[nozzle] += 1
# first step: generate the initial solution with equalized workload
assignment_result = [[0 for _ in range(len(component_points))] for _ in range(max_machine_index)]
assignment_points = [0 for _ in range(max_machine_index)]
weighted_points = list(
map(lambda x: x[1] + 1e-5 * nozzle_points[component_nozzle[x[0]]], enumerate(component_points)))
for part_index in np.argsort(weighted_points):
if (total_points := component_points[part_index]) == 0: # total placements for each component type
continue
machine_set = []
# define the machine that assigning placement points (considering the feeder limitation)
for machine_index in np.argsort(assignment_points):
if len(machine_set) >= component_points[part_index] or len(machine_set) >= component_feeders[part_index]:
break
machine_set.append(machine_index)
# Allocation of mounting points to available machines according to the principle of equality
while total_points:
assign_machine = list(filter(lambda x: assignment_points[x] == min(assignment_points), machine_set))
if len(assign_machine) == len(machine_set):
# averagely assign point to all available machines
points = total_points // len(assign_machine)
for machine_index in machine_set:
assignment_points[machine_index] += points
assignment_result[machine_index][part_index] += points
total_points -= points * len(assign_machine)
for machine_index in machine_set:
if total_points == 0:
break
assignment_points[machine_index] += 1
assignment_result[machine_index][part_index] += 1
total_points -= 1
else:
# assigning placements to make up for the gap between the least and the second least
second_least_machine, second_least_machine_points = -1, max(assignment_points) + 1
for idx in machine_set:
if assignment_points[idx] < second_least_machine_points and assignment_points[idx] != min(
assignment_points):
second_least_machine_points = assignment_points[idx]
second_least_machine = idx
assert second_least_machine != -1
if len(assign_machine) * (second_least_machine_points - min(assignment_points)) < total_points:
min_points = min(assignment_points)
total_points -= len(assign_machine) * (second_least_machine_points - min_points)
for machine_index in assign_machine:
assignment_points[machine_index] += (second_least_machine_points - min_points)
assignment_result[machine_index][part_index] += (
second_least_machine_points - min_points)
else:
points = total_points // len(assign_machine)
for machine_index in assign_machine:
assignment_points[machine_index] += points
assignment_result[machine_index][part_index] += points
total_points -= points * len(assign_machine)
for machine_index in assign_machine:
if total_points == 0:
break
assignment_points[machine_index] += 1
assignment_result[machine_index][part_index] += 1
total_points -= 1
# todo: implementation
# second step: estimate the assembly time for each machine
# third step: adjust the assignment results to reduce maximal assembly time among all machines
return assignment_result

View File

@@ -1,11 +0,0 @@
# implementation of
# <<Hybrid spider monkey optimisation algorithm for multi-level planning and scheduling problems of assembly lines>>
def assemblyline_optimizer_spidermonkey(pcb_data, component_data):
# number of swarms: 10
# maximum number of groups: 5
# number of loops: 100
# food source population: 50
# mutation rate: 0.1
# crossover rate: 0.9
# computation time(s): 200
pass