From 34c411caeb0136c6448a10b9eec7ee806b6d3b8e Mon Sep 17 00:00:00 2001 From: hit_lu Date: Mon, 24 Nov 2025 13:50:00 +0800 Subject: [PATCH] =?UTF-8?q?=E8=BF=90=E8=A1=8C=E7=8E=AF=E5=A2=83=E9=80=82?= =?UTF-8?q?=E9=85=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- base_optimizer/optimizer_common.py | 3 ++- base_optimizer/smopt_feederpriority.py | 2 +- estimator.py | 25 ++++++++++++++----------- lineopt_hyperheuristic.py | 2 +- optimizer.py | 2 +- 5 files changed, 19 insertions(+), 15 deletions(-) diff --git a/base_optimizer/optimizer_common.py b/base_optimizer/optimizer_common.py index 365d67c..2a25aa4 100644 --- a/base_optimizer/optimizer_common.py +++ b/base_optimizer/optimizer_common.py @@ -1,7 +1,7 @@ from functools import wraps from collections import defaultdict from tqdm import tqdm -from gurobipy import * + from sklearn.linear_model import LinearRegression from sklearn.svm import SVR @@ -23,6 +23,7 @@ import matplotlib.pyplot as plt import matplotlib import traceback import openpyxl +import itertools matplotlib.use('TkAgg') diff --git a/base_optimizer/smopt_feederpriority.py b/base_optimizer/smopt_feederpriority.py index e9f5288..3b9e4ac 100644 --- a/base_optimizer/smopt_feederpriority.py +++ b/base_optimizer/smopt_feederpriority.py @@ -168,7 +168,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur assert len(nozzle_pattern) == max_head_index while True: best_assign, best_assign_points = [], [] - best_assign_slot, best_assign_value = -1, -np.Inf + best_assign_slot, best_assign_value = -1, -np.inf best_nozzle_component, best_nozzle_component_points = None, None for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1): feeder_assign, feeder_assign_points = [], [] diff --git a/estimator.py b/estimator.py index 9128238..c4f2ea8 100644 --- a/estimator.py +++ b/estimator.py @@ -105,15 +105,14 @@ class Estimator: class NeuralEstimator(Estimator): def __init__(self): super().__init__() - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device) + self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(self.device) self.net_file = 'model/net_model.pth' - if os.path.exists(self.net_file): - try: - self.net.load_state_dict(torch.load(self.net_file)) - except: - warnings.warn('the parameters of neural net model load failed', UserWarning) + try: + self.net.load_state_dict(torch.load(self.net_file, map_location=self.device)) + except: + warnings.warn('the parameters of neural net model load failed', UserWarning) def init_weights(self): for m in self.net.modules(): @@ -128,8 +127,8 @@ class NeuralEstimator(Estimator): x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()])) y_train = np.array(data[1][::data_mgr.get_update_round()]) - x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device) - y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device) + x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(self.device) + y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(self.device) optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1) @@ -161,7 +160,7 @@ class NeuralEstimator(Estimator): data = data_mgr.loader('opt/' + params.test_file) x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1]) - x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device) + x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(self.device) self.net.eval() with torch.no_grad(): @@ -171,7 +170,7 @@ class NeuralEstimator(Estimator): def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None): assert board_width is not None and board_height is not None encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height)) - encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda") + encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to(self.device) return self.net(encoding)[0, 0].item() @@ -184,6 +183,8 @@ class HeuristicEstimator(Estimator): if os.path.exists(self.pickle_file): with open(self.pickle_file, 'rb') as f: self.lr = pickle.load(f) + else: + warnings.warn('the parameters of heuristic lr model load failed', UserWarning) def training(self, params): data = data_mgr.loader('opt/' + params.train_file) @@ -304,6 +305,8 @@ class ReconfigEstimator(Estimator): if os.path.exists(self.pickle_file): with open(self.pickle_file, 'rb') as f: self.lr = pickle.load(f) + else: + warnings.warn('the parameters of reconfig model load failed', UserWarning) def training(self, params): data = data_mgr.loader('opt/' + params.train_file) diff --git a/lineopt_hyperheuristic.py b/lineopt_hyperheuristic.py index 0b9eb9a..bdcc50f 100644 --- a/lineopt_hyperheuristic.py +++ b/lineopt_hyperheuristic.py @@ -411,7 +411,7 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number): best_component_list = component_list.copy() machine_cp_points = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, - best_component_list, best_heuristic_list, machine_number, is_opt=True) + best_component_list, best_heuristic_list, machine_number) assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)] for machine_idx in range(machine_number): diff --git a/optimizer.py b/optimizer.py index dee9b40..af38727 100644 --- a/optimizer.py +++ b/optimizer.py @@ -44,7 +44,7 @@ def main(): parser = argparse.ArgumentParser(description='assembly line optimizer implementation') parser.add_argument('--mode', default=1, type=int, help='mode: 0 -directly load pcb data without optimization ' 'for data analysis, 1 -optimize pcb data, 2 -batch test') - parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data') + parser.add_argument('--filename', default='data-1.txt', type=str, help='load pcb data') parser.add_argument('--comp_register', default=1, type=int, help='register the component according the pcb data') parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')