运行环境适配

This commit is contained in:
2025-11-24 13:50:00 +08:00
parent 4fd5560650
commit 34c411caeb
5 changed files with 19 additions and 15 deletions

View File

@@ -1,7 +1,7 @@
from functools import wraps from functools import wraps
from collections import defaultdict from collections import defaultdict
from tqdm import tqdm from tqdm import tqdm
from gurobipy import *
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR from sklearn.svm import SVR
@@ -23,6 +23,7 @@ import matplotlib.pyplot as plt
import matplotlib import matplotlib
import traceback import traceback
import openpyxl import openpyxl
import itertools
matplotlib.use('TkAgg') matplotlib.use('TkAgg')

View File

@@ -168,7 +168,7 @@ def feeder_allocate(component_data, pcb_data, feeder_data, nozzle_pattern, figur
assert len(nozzle_pattern) == max_head_index assert len(nozzle_pattern) == max_head_index
while True: while True:
best_assign, best_assign_points = [], [] best_assign, best_assign_points = [], []
best_assign_slot, best_assign_value = -1, -np.Inf best_assign_slot, best_assign_value = -1, -np.inf
best_nozzle_component, best_nozzle_component_points = None, None best_nozzle_component, best_nozzle_component_points = None, None
for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1): for slot in range(1, max_slot_index // 2 - (max_head_index - 1) * interval_ratio + 1):
feeder_assign, feeder_assign_points = [], [] feeder_assign, feeder_assign_points = [], []

View File

@@ -105,15 +105,14 @@ class Estimator:
class NeuralEstimator(Estimator): class NeuralEstimator(Estimator):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(device) self.net = Net(input_size=self.data_mgr.get_feature(), output_size=1).to(self.device)
self.net_file = 'model/net_model.pth' self.net_file = 'model/net_model.pth'
if os.path.exists(self.net_file): try:
try: self.net.load_state_dict(torch.load(self.net_file, map_location=self.device))
self.net.load_state_dict(torch.load(self.net_file)) except:
except: warnings.warn('the parameters of neural net model load failed', UserWarning)
warnings.warn('the parameters of neural net model load failed', UserWarning)
def init_weights(self): def init_weights(self):
for m in self.net.modules(): for m in self.net.modules():
@@ -128,8 +127,8 @@ class NeuralEstimator(Estimator):
x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()])) x_train = np.array(data_mgr.neural_encode(data[0][::data_mgr.get_update_round()]))
y_train = np.array(data[1][::data_mgr.get_update_round()]) y_train = np.array(data[1][::data_mgr.get_update_round()])
x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(device) x_train = torch.from_numpy(x_train.reshape((-1, np.shape(x_train)[1]))).float().to(self.device)
y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(device) y_train = torch.from_numpy(y_train.reshape((-1, 1))).float().to(self.device)
optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr) optimizer = torch.optim.Adam(self.net.parameters(), lr=params.lr)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5000, gamma=0.1)
@@ -161,7 +160,7 @@ class NeuralEstimator(Estimator):
data = data_mgr.loader('opt/' + params.test_file) data = data_mgr.loader('opt/' + params.test_file)
x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1]) x_test, y_test = np.array(data_mgr.neural_encode(data[0])), np.array(data[1])
x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(device) x_test = torch.from_numpy(x_test.reshape((-1, np.shape(x_test)[1]))).float().to(self.device)
self.net.eval() self.net.eval()
with torch.no_grad(): with torch.no_grad():
@@ -171,7 +170,7 @@ class NeuralEstimator(Estimator):
def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None): def predict(self, cp_points, cp_nozzle, board_width=None, board_height=None):
assert board_width is not None and board_height is not None assert board_width is not None and board_height is not None
encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height)) encoding = np.array(self.data_mgr.encode(cp_points, cp_nozzle, board_width, board_height))
encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to("cuda") encoding = torch.from_numpy(encoding.reshape((-1, np.shape(encoding)[0]))).float().to(self.device)
return self.net(encoding)[0, 0].item() return self.net(encoding)[0, 0].item()
@@ -184,6 +183,8 @@ class HeuristicEstimator(Estimator):
if os.path.exists(self.pickle_file): if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f: with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f) self.lr = pickle.load(f)
else:
warnings.warn('the parameters of heuristic lr model load failed', UserWarning)
def training(self, params): def training(self, params):
data = data_mgr.loader('opt/' + params.train_file) data = data_mgr.loader('opt/' + params.train_file)
@@ -304,6 +305,8 @@ class ReconfigEstimator(Estimator):
if os.path.exists(self.pickle_file): if os.path.exists(self.pickle_file):
with open(self.pickle_file, 'rb') as f: with open(self.pickle_file, 'rb') as f:
self.lr = pickle.load(f) self.lr = pickle.load(f)
else:
warnings.warn('the parameters of reconfig model load failed', UserWarning)
def training(self, params): def training(self, params):
data = data_mgr.loader('opt/' + params.train_file) data = data_mgr.loader('opt/' + params.train_file)

View File

@@ -411,7 +411,7 @@ def line_optimizer_hyperheuristic(component_data, pcb_data, machine_number):
best_component_list = component_list.copy() best_component_list = component_list.copy()
machine_cp_points = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders, machine_cp_points = convert_assignment_result(heuristic_map, cp_index, cp_points, cp_nozzle, cp_feeders,
best_component_list, best_heuristic_list, machine_number, is_opt=True) best_component_list, best_heuristic_list, machine_number)
assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)] assignment_result = [[0 for _ in range(len(component_data))] for _ in range(machine_number)]
for machine_idx in range(machine_number): for machine_idx in range(machine_number):

View File

@@ -44,7 +44,7 @@ def main():
parser = argparse.ArgumentParser(description='assembly line optimizer implementation') parser = argparse.ArgumentParser(description='assembly line optimizer implementation')
parser.add_argument('--mode', default=1, type=int, help='mode: 0 -directly load pcb data without optimization ' parser.add_argument('--mode', default=1, type=int, help='mode: 0 -directly load pcb data without optimization '
'for data analysis, 1 -optimize pcb data, 2 -batch test') 'for data analysis, 1 -optimize pcb data, 2 -batch test')
parser.add_argument('--filename', default='PCB.txt', type=str, help='load pcb data') parser.add_argument('--filename', default='data-1.txt', type=str, help='load pcb data')
parser.add_argument('--comp_register', default=1, type=int, help='register the component according the pcb data') parser.add_argument('--comp_register', default=1, type=int, help='register the component according the pcb data')
parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line') parser.add_argument('--machine_number', default=3, type=int, help='the number of machine in the assembly line')