Files
smt-optimizer/optimizer.py

189 lines
9.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from dataloader import *
from optimizer_genetic import *
from optimizer_heuristic import *
from optimizer_reconfiguration import *
from base_optimizer.optimizer_interface import *
def deviation(data):
assert len(data) > 0
average, variance = sum(data) / len(data), 0
for v in data:
variance += (v - average) ** 2
return variance / len(data)
def optimizer(pcb_data, component_data, line_optimizer, machine_optimizer, machine_number):
if line_optimizer == "heuristic":
assignment_result = assemblyline_optimizer_heuristic(pcb_data, component_data, machine_number)
elif line_optimizer == "genetic":
assignment_result = assemblyline_optimizer_genetic(pcb_data, component_data, machine_number)
elif line_optimizer == "reconfiguration":
assignment_result = reconfiguration_optimizer(pcb_data, component_data, machine_number)
else:
return
assignment_result_cpy = copy.deepcopy(assignment_result)
placement_points, assembly_info = [], []
partial_pcb_data, partial_component_data = defaultdict(pd.DataFrame), defaultdict(pd.DataFrame)
for machine_index in range(machine_number):
partial_pcb_data[machine_index] = pd.DataFrame(columns=pcb_data.columns)
partial_component_data[machine_index] = component_data.copy(deep=True)
placement_points.append(sum(assignment_result[machine_index]))
assert sum(placement_points) == len(pcb_data)
# === averagely assign available feeder ===
for part_index, data in component_data.iterrows():
feeder_limit = data['feeder-limit']
feeder_points = [assignment_result[machine_index][part_index] for machine_index in range(machine_number)]
for machine_index in range(machine_number):
if feeder_points[machine_index] == 0:
continue
arg_feeder = max(math.floor(feeder_points[machine_index] / sum(feeder_points) * data['feeder-limit']), 1)
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] = arg_feeder
feeder_limit -= arg_feeder
for machine_index in range(machine_number):
if feeder_limit <= 0:
break
if feeder_points[machine_index] == 0:
continue
partial_component_data[machine_index].loc[part_index, 'feeder-limit'] += 1
feeder_limit -= 1
for machine_index in range(machine_number):
if feeder_points[machine_index] > 0:
assert partial_component_data[machine_index].loc[part_index, 'feeder-limit'] > 0
# === assign placements ===
component_machine_index = [0 for _ in range(len(component_data))]
for _, data in pcb_data.iterrows():
part_index = component_data[component_data['part'] == data['part']].index.tolist()[0]
while True:
machine_index = component_machine_index[part_index]
if assignment_result[machine_index][part_index] == 0:
component_machine_index[part_index] += 1
machine_index += 1
else:
break
assignment_result[machine_index][part_index] -= 1
partial_pcb_data[machine_index] = pd.concat([partial_pcb_data[machine_index], pd.DataFrame(data).T])
# === adjust the number of available feeders for single optimization separately ===
for machine_index, data in partial_pcb_data.items():
data = data.reset_index(drop=True)
if len(data) == 0:
continue
part_info = [] # part info list(part index, part points, available feeder-num, upper feeder-num)
for part_index, cp_data in partial_component_data[machine_index].iterrows():
if assignment_result_cpy[machine_index][part_index]:
part_info.append(
[part_index, assignment_result_cpy[machine_index][part_index], 1, cp_data['feeder-limit']])
part_info = sorted(part_info, key=lambda x: x[1], reverse=True)
start_index, end_index = 0, min(max_head_index - 1, len(part_info) - 1)
while start_index < len(part_info):
assign_part_point, assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
assign_part_index.append(idx_)
variance = deviation(assign_part_point)
while start_index <= end_index:
part_info_index = assign_part_index[np.argmax(assign_part_point)]
if part_info[part_info_index][2] < part_info[part_info_index][3]: # 供料器数目上限的限制
part_info[part_info_index][2] += 1
end_index -= 1
new_assign_part_point, new_assign_part_index = [], []
for idx_ in range(start_index, end_index + 1):
for _ in range(part_info[idx_][2]):
new_assign_part_point.append(part_info[idx_][1] / part_info[idx_][2])
new_assign_part_index.append(idx_)
new_variance = deviation(new_assign_part_point)
if variance < new_variance:
part_info[part_info_index][2] -= 1
end_index += 1
break
variance = new_variance
assign_part_index, assign_part_point = new_assign_part_index.copy(), new_assign_part_point.copy()
else:
break
start_index = end_index + 1
end_index = min(start_index + max_head_index - 1, len(part_info) - 1)
# update available feeder number
max_avl_feeder = max(part_info, key=lambda x: x[2])[2]
for info in part_info:
partial_component_data[machine_index].loc[info[0], 'feeder-limit'] = math.ceil(info[2] / max_avl_feeder)
assembly_info.append(base_optimizer(machine_index + 1, data, partial_component_data[machine_index],
feeder_data=pd.DataFrame(columns=['slot', 'part', 'arg']),
method=machine_optimizer, hinter=True))
with open('model/lr_model.pkl', 'rb') as f:
lr = pickle.load(f)
average_time, standard_deviation_time = sum(
[assembly_info[m].placement_time for m in range(machine_number)]) / machine_number, 0
for machine_index in range(machine_number):
total_component_types = sum(1 if pt else 0 for pt in assignment_result_cpy[machine_index])
placement_time = assembly_info[machine_index].placement_time
regression_time = lr.coef_[0][0] * assembly_info[machine_index].cycle_counter + lr.coef_[0][1] * assembly_info[
machine_index].nozzle_change_counter + lr.coef_[0][2] * assembly_info[machine_index].pickup_counter + \
lr.coef_[0][3] * assembly_info[machine_index].pickup_movement + lr.coef_[0][4] * \
placement_points[machine_index] + lr.intercept_[0]
print(f'assembly time for machine {machine_index + 1: d}: {placement_time: .3f} s, total placement: '
f'{placement_points[machine_index]}, total component types {total_component_types: d}', end=', ')
print(f'regression time: {regression_time: .3f} s')
standard_deviation_time += pow(placement_time - average_time, 2)
standard_deviation_time /= machine_number
standard_deviation_time = math.sqrt(standard_deviation_time)
print(f'finial assembly time: {max(info.placement_time for info in assembly_info): .3f} s, '
f'standard deviation: {standard_deviation_time: .3f}')
@timer_wrapper
def main():
warnings.simplefilter(action='ignore', category=FutureWarning)
# 参数解析
parser = argparse.ArgumentParser(description='assembly line optimizer implementation')
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data')
parser.add_argument('--auto_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')
parser.add_argument('--machine_optimizer', default='feeder_scan', type=str, help='optimizer for single machine')
parser.add_argument('--line_optimizer', default='genetic', type=str, help='optimizer for PCB Assembly Line')
parser.add_argument('--feeder_limit', default=1, type=int,
help='the upper feeder limit for each type of component')
params = parser.parse_args()
# 结果输出显示所有行和列
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# 加载PCB数据
pcb_data, component_data, _ = load_data(params.filename, default_feeder_limit=params.feeder_limit,
cp_auto_register=params.auto_register, load_feeder_data=False) # 加载PCB数据
optimizer(pcb_data, component_data, params.line_optimizer, params.machine_optimizer, params.machine_number)
if __name__ == '__main__':
main()