| | |
| | | return -1; |
| | | } |
| | | |
| | | lock(); |
| | | // Ensure stale transfer state does not leak across runs. |
| | | m_nContinuousTransferStep = CTStep_Unknow; |
| | | m_nContinuousWorkingPort = 0; |
| | | m_nContinuousWorkingSlot = 0; |
| | | if (m_pActiveRobotTask != nullptr) { |
| | | LOGW("<Master>start: stale active robot task found in READY, clearing it."); |
| | | delete m_pActiveRobotTask; |
| | | m_pActiveRobotTask = nullptr; |
| | | } |
| | | unlock(); |
| | | LOGI("<Master>start reset transfer state(step=%d, port=%d, slot=%d).", |
| | | m_nContinuousTransferStep, m_nContinuousWorkingPort, m_nContinuousWorkingSlot); |
| | | |
| | | m_bContinuousTransfer = false; |
| | | m_bBatch = false; |
| | | setState(MASTERSTATE::STARTING); |
| | |
| | | return -1; |
| | | } |
| | | |
| | | lock(); |
| | | // Continuous transfer must start from a clean state every time. |
| | | m_nContinuousTransferStep = CTStep_Unknow; |
| | | m_nContinuousWorkingPort = 0; |
| | | m_nContinuousWorkingSlot = 0; |
| | | if (m_pActiveRobotTask != nullptr) { |
| | | LOGW("<Master>startContinuousTransfer: stale active robot task found in READY, clearing it."); |
| | | delete m_pActiveRobotTask; |
| | | m_pActiveRobotTask = nullptr; |
| | | } |
| | | unlock(); |
| | | LOGI("<Master>startContinuousTransfer reset transfer state(step=%d, port=%d, slot=%d).", |
| | | m_nContinuousTransferStep, m_nContinuousWorkingPort, m_nContinuousWorkingSlot); |
| | | |
| | | m_bContinuousTransfer = true; |
| | | m_bBatch = false; |
| | | setState(MASTERSTATE::STARTING); |
| | |
| | | if (m_state != MASTERSTATE::READY) { |
| | | return -1; |
| | | } |
| | | |
| | | lock(); |
| | | // Keep behavior consistent with other start paths. |
| | | m_nContinuousTransferStep = CTStep_Unknow; |
| | | m_nContinuousWorkingPort = 0; |
| | | m_nContinuousWorkingSlot = 0; |
| | | if (m_pActiveRobotTask != nullptr) { |
| | | LOGW("<Master>startBatch: stale active robot task found in READY, clearing it."); |
| | | delete m_pActiveRobotTask; |
| | | m_pActiveRobotTask = nullptr; |
| | | } |
| | | unlock(); |
| | | LOGI("<Master>startBatch reset transfer state(step=%d, port=%d, slot=%d).", |
| | | m_nContinuousTransferStep, m_nContinuousWorkingPort, m_nContinuousWorkingSlot); |
| | | |
| | | m_bContinuousTransfer = false; |
| | | m_bBatch = true; |
| | |
| | | } |
| | | m_pActiveRobotTask = createTransferTask(pLoadPorts[s], pAligner, primaryType, secondaryType, 1, m_bJobMode); |
| | | if (m_pActiveRobotTask != nullptr) { |
| | | LOGI("<Master>LoadPort->Aligner命中(RUNNING): port=%d, primaryType=%d, preferredPort=%d", |
| | | s + 1, (int)primaryType, preferredPortForPrimary >= 0 ? (preferredPortForPrimary + 1) : 0); |
| | | CGlass* pGlass = (CGlass*)m_pActiveRobotTask->getContext(); |
| | | CProcessJob* pPJ = (pGlass != nullptr) ? pGlass->getProcessJob() : nullptr; |
| | | LOGI("<Master>LoadPort->Aligner命中(RUNNING): port=%d, primaryType=%d, preferredPort=%d, glass=%s, scheduled=%d, pj=%s", |
| | | s + 1, (int)primaryType, preferredPortForPrimary >= 0 ? (preferredPortForPrimary + 1) : 0, |
| | | pGlass != nullptr ? pGlass->getID().c_str() : "", |
| | | (pGlass != nullptr && pGlass->isScheduledForProcessing()) ? 1 : 0, |
| | | pPJ != nullptr ? pPJ->id().c_str() : "NULL"); |
| | | if (pGlass == nullptr) { |
| | | LOGE("<Master>LoadPort->Aligner命中(RUNNING)但context为空,任务已丢弃."); |
| | | delete m_pActiveRobotTask; |
| | | m_pActiveRobotTask = nullptr; |
| | | continue; |
| | | } |
| | | if (pGlass->getBuddy() != nullptr) { |
| | | delete m_pActiveRobotTask; |
| | | m_pActiveRobotTask = nullptr; |
| | |
| | | |
| | | m_pActiveRobotTask = createTransferTask(pLoadPorts[s], pAligner, primaryType, secondaryType, 1, m_bJobMode); |
| | | if (m_pActiveRobotTask != nullptr) { |
| | | LOGI("<Master>LoadPort->Aligner命中(RUNNING_BATCH): port=%d, primaryType=%d, preferredPort=%d", |
| | | s + 1, (int)primaryType, preferredPortForPrimary >= 0 ? (preferredPortForPrimary + 1) : 0); |
| | | auto* pGlass = static_cast<CGlass*>(m_pActiveRobotTask->getContext()); |
| | | CProcessJob* pPJ = (pGlass != nullptr) ? pGlass->getProcessJob() : nullptr; |
| | | LOGI("<Master>LoadPort->Aligner命中(RUNNING_BATCH): port=%d, primaryType=%d, preferredPort=%d, glass=%s, scheduled=%d, pj=%s", |
| | | s + 1, (int)primaryType, preferredPortForPrimary >= 0 ? (preferredPortForPrimary + 1) : 0, |
| | | pGlass != nullptr ? pGlass->getID().c_str() : "", |
| | | (pGlass != nullptr && pGlass->isScheduledForProcessing()) ? 1 : 0, |
| | | pPJ != nullptr ? pPJ->id().c_str() : "NULL"); |
| | | if (pGlass == nullptr) { |
| | | LOGE("<Master>LoadPort->Aligner命中(RUNNING_BATCH)但context为空,任务已丢弃."); |
| | | delete m_pActiveRobotTask; |
| | | m_pActiveRobotTask = nullptr; |
| | | continue; |
| | | } |
| | | if (pGlass->getBuddy() != nullptr) { |
| | | delete m_pActiveRobotTask; m_pActiveRobotTask = nullptr; |
| | | continue; |
| | |
| | | } |
| | | } |
| | | |
| | | // 先清空所有端口玻璃的调度标记,避免沿用历史 PJ 状态。 |
| | | int clearedGlassCount = 0; |
| | | for (int i = 0; i < 4; i++) { |
| | | auto* pPort = (CLoadPort*)getEquipment(EQ_ID_LOADPORT1 + i); |
| | | if (pPort == nullptr) continue; |
| | | for (int slot = 1; slot <= SLOT_MAX; slot++) { |
| | | auto* pGlass = pPort->getGlassFromSlot(slot); |
| | | if (pGlass == nullptr) continue; |
| | | pGlass->setProcessJob(nullptr); |
| | | pGlass->setScheduledForProcessing(FALSE); |
| | | clearedGlassCount++; |
| | | } |
| | | } |
| | | LOGI("<Master>setProcessJobs: cleared scheduling marks on %d glass(es).", clearedGlassCount); |
| | | |
| | | auto toPortIndex = [](int eqid) -> int { |
| | | switch (eqid) { |
| | | case EQ_ID_LOADPORT1: return 0; |
| | | case EQ_ID_LOADPORT2: return 1; |
| | | case EQ_ID_LOADPORT3: return 2; |
| | | case EQ_ID_LOADPORT4: return 3; |
| | | default: return -1; |
| | | } |
| | | }; |
| | | |
| | | auto hasAnySelectedPorts = [](const PJWarp& warp) -> bool { |
| | | for (int p = 0; p < 4; p++) { |
| | | if (warp.selectedPorts[p]) return true; |
| | | } |
| | | return false; |
| | | }; |
| | | |
| | | auto hasAnyLegacyCheckedSlots = [](const PJWarp& warp) -> bool { |
| | | for (int s = 0; s < 8; s++) { |
| | | if (warp.checkSlot[s]) return true; |
| | | } |
| | | return false; |
| | | }; |
| | | |
| | | // 更新context |
| | | for (auto pj : m_processJobs) { |
| | | for (auto& c : pj->carriers()) { |
| | | auto pPort = getPortWithCarrierId(c.carrierId); |
| | | if (pPort == nullptr) continue; |
| | | const int portIndex = toPortIndex(pPort->getID()); |
| | | |
| | | short downloadMap = 0; |
| | | for (auto s : c.slots) { |
| | |
| | | |
| | | std::vector<uint8_t> newSlots; |
| | | std::vector<void*> newContexts; |
| | | PJWarp& warp = pj->getPjWarp(); |
| | | const bool useMultiPortWarp = hasAnySelectedPorts(warp); |
| | | const bool useLegacyWarp = !useMultiPortWarp && hasAnyLegacyCheckedSlots(warp); |
| | | for (auto s : c.slots) { |
| | | auto pGlass = pPort->getGlassFromSlot(s); |
| | | if (pGlass == nullptr) continue; |
| | | |
| | | newSlots.push_back(s); |
| | | newContexts.push_back(pGlass); |
| | | |
| | | // 默认:Host 下发但未携带 PJWarp 细节时,carrier slots 视为应加工。 |
| | | BOOL scheduled = TRUE; |
| | | int material = (pGlass->getType() == MaterialsType::G2) ? 2 : 1; |
| | | |
| | | if (1 <= s && s <= 8) { |
| | | if (useMultiPortWarp && 0 <= portIndex && portIndex < 4 && warp.selectedPorts[portIndex]) { |
| | | scheduled = warp.checkSlots[portIndex][s - 1]; |
| | | material = warp.materialSlots[portIndex][s - 1]; |
| | | } |
| | | else if (useLegacyWarp) { |
| | | scheduled = warp.checkSlot[s - 1]; |
| | | material = warp.material[s - 1]; |
| | | } |
| | | } |
| | | material = (material == 2) ? 2 : 1; |
| | | |
| | | pGlass->setProcessJob(pj); |
| | | pGlass->setScheduledForProcessing(scheduled); |
| | | pGlass->setType(static_cast<MaterialsType>(material)); |
| | | LOGI("<Master>setProcessJobs bind: pj=%s, port=%d, slot=%d, glass=%s, scheduled=%d, material=%d", |
| | | pj->id().c_str(), portIndex + 1, (int)s, pGlass->getID().c_str(), scheduled ? 1 : 0, material); |
| | | } |
| | | |
| | | pj->setCarrierSlotsAndContexts(c.carrierId, newSlots, newContexts); |
| | |
| | | } |
| | | m_pControlJob->setPJs(tempPjs); |
| | | |
| | | // 重建运行态缓存,避免重启后仅依赖 Queued 选择导致找不到当前 PJ。 |
| | | m_inProcesJobs.clear(); |
| | | m_completeProcessJobs.clear(); |
| | | m_queueGlasses.clear(); |
| | | m_inProcesGlasses.clear(); |
| | | m_completeGlasses.clear(); |
| | | for (auto pj : tempPjs) { |
| | | if (pj == nullptr) continue; |
| | | switch (pj->state()) { |
| | | case PJState::InProcess: |
| | | case PJState::Paused: |
| | | m_inProcesJobs.push_back(pj); |
| | | break; |
| | | case PJState::Completed: |
| | | case PJState::Aborted: |
| | | case PJState::Failed: |
| | | m_completeProcessJobs.push_back(pj); |
| | | break; |
| | | default: |
| | | break; |
| | | } |
| | | } |
| | | LOGI("<Master>loadState: ProcessJob rebuild done. total=%d, inProcess=%d, complete=%d", |
| | | (int)tempPjs.size(), |
| | | (int)m_inProcesJobs.size(), |
| | | (int)m_completeProcessJobs.size()); |
| | | |
| | | |
| | | // 更新contexts |
| | | auto pjs = m_pControlJob->getPjs(); |
| | |
| | | |
| | | CProcessJob* CMaster::acquireNextProcessJob() |
| | | { |
| | | if (m_pControlJob == nullptr) return nullptr; |
| | | auto& pjs = m_pControlJob->getPjs(); |
| | | for (const auto pj : pjs) { |
| | | if (pj->state() == PJState::Queued) { |
| | | LOGI("<Master>acquireNextProcessJob: start queued PJ(%s)", pj->id().c_str()); |
| | | pj->start(); |
| | | return pj; |
| | | } |
| | | } |
| | | |
| | | // 若没有 Queued,继续复用已经在制/暂停的 PJ(例如 loadState 恢复后的场景)。 |
| | | for (const auto pj : pjs) { |
| | | if (pj->state() == PJState::InProcess || pj->state() == PJState::Paused) { |
| | | LOGI("<Master>acquireNextProcessJob: reuse PJ(%s), state=%d", |
| | | pj->id().c_str(), (int)pj->state()); |
| | | return pj; |
| | | } |
| | | } |
| | | |
| | | |
| | | return nullptr; |
| | | } |