`
sunwinner
  • 浏览: 197963 次
  • 性别: Icon_minigender_1
  • 来自: 上海
社区版块
存档分类
最新评论

Clsssic MapReduce (MapReduce 1) - Job assignment

 
阅读更多

Tasktrackers run a simple loop that periodically sends heartbeat method calls to the jobtracker.

  /**
   * Main service loop.  Will stay in this loop forever.
   */
  State offerService() throws Exception {
    long lastHeartbeat = System.currentTimeMillis();

    while (running && !shuttingDown) {
      try {
        long now = System.currentTimeMillis();
        
        // accelerate to account for multiple finished tasks up-front
        long remaining = 
          (lastHeartbeat + getHeartbeatInterval(finishedCount.get())) - now;
        while (remaining > 0) {
          // sleeps for the wait time or 
          // until there are *enough* empty slots to schedule tasks
          synchronized (finishedCount) {
            finishedCount.wait(remaining);
            
            // Recompute
            now = System.currentTimeMillis();
            remaining = 
              (lastHeartbeat + getHeartbeatInterval(finishedCount.get())) - now;
            
            if (remaining <= 0) {
              // Reset count 
              finishedCount.set(0);
              break;
            }
          }
        }

        // If the TaskTracker is just starting up:
        // 1. Verify the versions matches with the JobTracker
        // 2. Get the system directory & filesystem
        if(justInited) {
          String jtBuildVersion = jobClient.getBuildVersion();
          String jtVersion = jobClient.getVIVersion();
          if (!isPermittedVersion(jtBuildVersion, jtVersion)) {
            String msg = "Shutting down. Incompatible buildVersion." +
              "\nJobTracker's: " + jtBuildVersion + 
              "\nTaskTracker's: "+ VersionInfo.getBuildVersion() +
              " and " + CommonConfigurationKeys.HADOOP_RELAXED_VERSION_CHECK_KEY +
              " is " + (relaxedVersionCheck ? "enabled" : "not enabled");
            LOG.fatal(msg);
            try {
              jobClient.reportTaskTrackerError(taskTrackerName, null, msg);
            } catch(Exception e ) {
              LOG.info("Problem reporting to jobtracker: " + e);
            }
            return State.DENIED;
          }
          
          String dir = jobClient.getSystemDir();
          while (dir == null) {
            LOG.info("Failed to get system directory...");
            
            // Re-try
            try {
              // Sleep interval: 1000 ms - 5000 ms
              int sleepInterval = 1000 + r.nextInt(4000);
              Thread.sleep(sleepInterval);
            } catch (InterruptedException ie) 
            {}
            dir = jobClient.getSystemDir();
          }
          systemDirectory = new Path(dir);
          systemFS = systemDirectory.getFileSystem(fConf);
        }

        now = System.currentTimeMillis();
        if (now > (lastCheckDirsTime + diskHealthCheckInterval)) {
          localStorage.checkDirs();
          lastCheckDirsTime = now;
          int numFailures = localStorage.numFailures();
          // Re-init the task tracker if there were any new failures
          if (numFailures > lastNumFailures) {
            lastNumFailures = numFailures;
            return State.STALE;
          }
        }

        // Send the heartbeat and process the jobtracker's directives
        HeartbeatResponse heartbeatResponse = transmitHeartBeat(now);

        // Note the time when the heartbeat returned, use this to decide when to send the
        // next heartbeat   
        lastHeartbeat = System.currentTimeMillis();
        
        // Check if the map-event list needs purging
        Set<JobID> jobs = heartbeatResponse.getRecoveredJobs();
        if (jobs.size() > 0) {
          synchronized (this) {
            // purge the local map events list
            for (JobID job : jobs) {
              RunningJob rjob;
              synchronized (runningJobs) {
                rjob = runningJobs.get(job);          
                if (rjob != null) {
                  synchronized (rjob) {
                    FetchStatus f = rjob.getFetchStatus();
                    if (f != null) {
                      f.reset();
                    }
                  }
                }
              }
            }

            // Mark the reducers in shuffle for rollback
            synchronized (shouldReset) {
              for (Map.Entry<TaskAttemptID, TaskInProgress> entry 
                   : runningTasks.entrySet()) {
                if (entry.getValue().getStatus().getPhase() == Phase.SHUFFLE) {
                  this.shouldReset.add(entry.getKey());
                }
              }
            }
          }
        }
        
        TaskTrackerAction[] actions = heartbeatResponse.getActions();
        if(LOG.isDebugEnabled()) {
          LOG.debug("Got heartbeatResponse from JobTracker with responseId: " + 
                    heartbeatResponse.getResponseId() + " and " + 
                    ((actions != null) ? actions.length : 0) + " actions");
        }
        if (reinitTaskTracker(actions)) {
          return State.STALE;
        }
            
        // resetting heartbeat interval from the response.
        heartbeatInterval = heartbeatResponse.getHeartbeatInterval();
        justStarted = false;
        justInited = false;
        if (actions != null){ 
          for(TaskTrackerAction action: actions) {
            if (action instanceof LaunchTaskAction) {
              addToTaskQueue((LaunchTaskAction)action);
            } else if (action instanceof CommitTaskAction) {
              CommitTaskAction commitAction = (CommitTaskAction)action;
              if (!commitResponses.contains(commitAction.getTaskID())) {
                LOG.info("Received commit task action for " + 
                          commitAction.getTaskID());
                commitResponses.add(commitAction.getTaskID());
              }
            } else {
              addActionToCleanup(action);
            }
          }
        }
        markUnresponsiveTasks();
        killOverflowingTasks();
            
        //we've cleaned up, resume normal operation
        if (!acceptNewTasks && isIdle()) {
          acceptNewTasks=true;
        }
        //The check below may not be required every iteration but we are 
        //erring on the side of caution here. We have seen many cases where
        //the call to jetty's getLocalPort() returns different values at 
        //different times. Being a real paranoid here.
        checkJettyPort(server.getPort());
      } catch (InterruptedException ie) {
        LOG.info("Interrupted. Closing down.");
        return State.INTERRUPTED;
      } catch (DiskErrorException de) {
        String msg = "Exiting task tracker for disk error:\n" +
          StringUtils.stringifyException(de);
        LOG.error(msg);
        synchronized (this) {
          jobClient.reportTaskTrackerError(taskTrackerName, 
                                           "DiskErrorException", msg);
        }
        // If we caught a DEE here we have no good dirs, therefore shutdown.
        return State.DENIED;
      } catch (RemoteException re) {
        String reClass = re.getClassName();
        if (DisallowedTaskTrackerException.class.getName().equals(reClass)) {
          LOG.info("Tasktracker disallowed by JobTracker.");
          return State.DENIED;
        }
      } catch (Exception except) {
        String msg = "Caught exception: " + 
          StringUtils.stringifyException(except);
        LOG.error(msg);
      }
    }

    return State.NORMAL;
  }

 Heartbeats tell the jobtracker that a tasktracker is alive, but they also double as a channel for messages. As a part of the heartbeat, a tasktracker will indicate whether it is ready to run a new task, and if it is, the jobtracker will allocate it a task, which it communicates to the tasktracker using the heartbeat return value.

  /**
   * The periodic heartbeat mechanism between the {@link TaskTracker} and
   * the {@link JobTracker}.
   * 
   * The {@link JobTracker} processes the status information sent by the 
   * {@link TaskTracker} and responds with instructions to start/stop 
   * tasks or jobs, and also 'reset' instructions during contingencies. 
   */
  public synchronized HeartbeatResponse heartbeat(TaskTrackerStatus status, 
                                                  boolean restarted,
                                                  boolean initialContact,
                                                  boolean acceptNewTasks, 
                                                  short responseId) 
    throws IOException {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Got heartbeat from: " + status.getTrackerName() + 
                " (restarted: " + restarted + 
                " initialContact: " + initialContact + 
                " acceptNewTasks: " + acceptNewTasks + ")" +
                " with responseId: " + responseId);
    }

    // Make sure heartbeat is from a tasktracker allowed by the jobtracker.
    if (!acceptTaskTracker(status)) {
      throw new DisallowedTaskTrackerException(status);
    }

    // First check if the last heartbeat response got through
    String trackerName = status.getTrackerName();
    long now = clock.getTime();
    if (restarted) {
      faultyTrackers.markTrackerHealthy(status.getHost());
    } else {
      faultyTrackers.checkTrackerFaultTimeout(status.getHost(), now);
    }
    
    HeartbeatResponse prevHeartbeatResponse =
      trackerToHeartbeatResponseMap.get(trackerName);
    boolean addRestartInfo = false;

    if (initialContact != true) {
      // If this isn't the 'initial contact' from the tasktracker,
      // there is something seriously wrong if the JobTracker has
      // no record of the 'previous heartbeat'; if so, ask the 
      // tasktracker to re-initialize itself.
      if (prevHeartbeatResponse == null) {
        // This is the first heartbeat from the old tracker to the newly 
        // started JobTracker
        if (hasRestarted()) {
          addRestartInfo = true;
          // inform the recovery manager about this tracker joining back
          recoveryManager.unMarkTracker(trackerName);
        } else {
          // Jobtracker might have restarted but no recovery is needed
          // otherwise this code should not be reached
          LOG.warn("Serious problem, cannot find record of 'previous' " +
                   "heartbeat for '" + trackerName + 
                   "'; reinitializing the tasktracker");
          return new HeartbeatResponse(responseId, 
              new TaskTrackerAction[] {new ReinitTrackerAction()});
        }

      } else {
                
        // It is completely safe to not process a 'duplicate' heartbeat from a 
        // {@link TaskTracker} since it resends the heartbeat when rpcs are 
        // lost see {@link TaskTracker.transmitHeartbeat()};
        // acknowledge it by re-sending the previous response to let the 
        // {@link TaskTracker} go forward. 
        if (prevHeartbeatResponse.getResponseId() != responseId) {
          LOG.info("Ignoring 'duplicate' heartbeat from '" + 
              trackerName + "'; resending the previous 'lost' response");
          return prevHeartbeatResponse;
        }
      }
    }
      
    // Process this heartbeat 
    short newResponseId = (short)(responseId + 1);
    status.setLastSeen(now);
    if (!processHeartbeat(status, initialContact, now)) {
      if (prevHeartbeatResponse != null) {
        trackerToHeartbeatResponseMap.remove(trackerName);
      }
      return new HeartbeatResponse(newResponseId, 
                   new TaskTrackerAction[] {new ReinitTrackerAction()});
    }
      
    // Initialize the response to be sent for the heartbeat
    HeartbeatResponse response = new HeartbeatResponse(newResponseId, null);
    List<TaskTrackerAction> actions = new ArrayList<TaskTrackerAction>();
    boolean isBlacklisted = faultyTrackers.isBlacklisted(status.getHost());
    // Check for new tasks to be executed on the tasktracker
    if (recoveryManager.shouldSchedule() && acceptNewTasks && !isBlacklisted) {
      TaskTrackerStatus taskTrackerStatus = getTaskTrackerStatus(trackerName);
      if (taskTrackerStatus == null) {
        LOG.warn("Unknown task tracker polling; ignoring: " + trackerName);
      } else {
        List<Task> tasks = getSetupAndCleanupTasks(taskTrackerStatus);
        if (tasks == null ) {
          tasks = taskScheduler.assignTasks(taskTrackers.get(trackerName));
        }
        if (tasks != null) {
          for (Task task : tasks) {
            expireLaunchingTasks.addNewTask(task.getTaskID());
            if(LOG.isDebugEnabled()) {
              LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID());
            }
            actions.add(new LaunchTaskAction(task));
          }
        }
      }
    }
      
    // Check for tasks to be killed
    List<TaskTrackerAction> killTasksList = getTasksToKill(trackerName);
    if (killTasksList != null) {
      actions.addAll(killTasksList);
    }
     
    // Check for jobs to be killed/cleanedup
    List<TaskTrackerAction> killJobsList = getJobsForCleanup(trackerName);
    if (killJobsList != null) {
      actions.addAll(killJobsList);
    }

    // Check for tasks whose outputs can be saved
    List<TaskTrackerAction> commitTasksList = getTasksToSave(status);
    if (commitTasksList != null) {
      actions.addAll(commitTasksList);
    }

    // calculate next heartbeat interval and put in heartbeat response
    int nextInterval = getNextHeartbeatInterval();
    response.setHeartbeatInterval(nextInterval);
    response.setActions(
                        actions.toArray(new TaskTrackerAction[actions.size()]));
    
    // check if the restart info is req
    if (addRestartInfo) {
      response.setRecoveredJobs(recoveryManager.getJobsToRecover());
    }
        
    // Update the trackerToHeartbeatResponseMap
    trackerToHeartbeatResponseMap.put(trackerName, response);

    // Done processing the hearbeat, now remove 'marked' tasks
    removeMarkedTasks(trackerName);
        
    return response;
  }
  

 Below is the default inplementation of TaskScheduler in Hadoop of assignTasks(you can choose among several TaskSchedulers implementation such as fair scheduler). The default one simply maintains a priority list of jobs. The tasks to run is communicates to the tasktracker using the heartbeat return value.

  /**
   * Returns the tasks we'd like the TaskTracker to execute right now.
   * 
   * @param taskTracker The TaskTracker for which we're looking for tasks.
   * @return A list of tasks to run on that TaskTracker, possibly empty.
   */
  @Override
  public synchronized List<Task> assignTasks(TaskTracker taskTracker)
      throws IOException {
    // Check for JT safe-mode
    if (taskTrackerManager.isInSafeMode()) {
      LOG.info("JobTracker is in safe-mode, not scheduling any tasks.");
      return null;
    } 

    TaskTrackerStatus taskTrackerStatus = taskTracker.getStatus(); 
    ClusterStatus clusterStatus = taskTrackerManager.getClusterStatus();
    final int numTaskTrackers = clusterStatus.getTaskTrackers();
    final int clusterMapCapacity = clusterStatus.getMaxMapTasks();
    final int clusterReduceCapacity = clusterStatus.getMaxReduceTasks();

    Collection<JobInProgress> jobQueue =
      jobQueueJobInProgressListener.getJobQueue();

    //
    // Get map + reduce counts for the current tracker.
    //
    final int trackerMapCapacity = taskTrackerStatus.getMaxMapSlots();
    final int trackerReduceCapacity = taskTrackerStatus.getMaxReduceSlots();
    final int trackerRunningMaps = taskTrackerStatus.countMapTasks();
    final int trackerRunningReduces = taskTrackerStatus.countReduceTasks();

    // Assigned tasks
    List<Task> assignedTasks = new ArrayList<Task>();

    //
    // Compute (running + pending) map and reduce task numbers across pool
    //
    int remainingReduceLoad = 0;
    int remainingMapLoad = 0;
    synchronized (jobQueue) {
      for (JobInProgress job : jobQueue) {
        if (job.getStatus().getRunState() == JobStatus.RUNNING) {
          remainingMapLoad += (job.desiredMaps() - job.finishedMaps());
          if (job.scheduleReduces()) {
            remainingReduceLoad += 
              (job.desiredReduces() - job.finishedReduces());
          }
        }
      }
    }

    // Compute the 'load factor' for maps and reduces
    double mapLoadFactor = 0.0;
    if (clusterMapCapacity > 0) {
      mapLoadFactor = (double)remainingMapLoad / clusterMapCapacity;
    }
    double reduceLoadFactor = 0.0;
    if (clusterReduceCapacity > 0) {
      reduceLoadFactor = (double)remainingReduceLoad / clusterReduceCapacity;
    }
        
    //
    // In the below steps, we allocate first map tasks (if appropriate),
    // and then reduce tasks if appropriate.  We go through all jobs
    // in order of job arrival; jobs only get serviced if their 
    // predecessors are serviced, too.
    //

    //
    // We assign tasks to the current taskTracker if the given machine 
    // has a workload that's less than the maximum load of that kind of
    // task.
    // However, if the cluster is close to getting loaded i.e. we don't
    // have enough _padding_ for speculative executions etc., we only 
    // schedule the "highest priority" task i.e. the task from the job 
    // with the highest priority.
    //
    
    final int trackerCurrentMapCapacity = 
      Math.min((int)Math.ceil(mapLoadFactor * trackerMapCapacity), 
                              trackerMapCapacity);
    int availableMapSlots = trackerCurrentMapCapacity - trackerRunningMaps;
    boolean exceededMapPadding = false;
    if (availableMapSlots > 0) {
      exceededMapPadding = 
        exceededPadding(true, clusterStatus, trackerMapCapacity);
    }
    
    int numLocalMaps = 0;
    int numNonLocalMaps = 0;
    scheduleMaps:
    for (int i=0; i < availableMapSlots; ++i) {
      synchronized (jobQueue) {
        for (JobInProgress job : jobQueue) {
          if (job.getStatus().getRunState() != JobStatus.RUNNING) {
            continue;
          }

          Task t = null;
          
          // Try to schedule a node-local or rack-local Map task
          t = 
            job.obtainNewNodeOrRackLocalMapTask(taskTrackerStatus, 
                numTaskTrackers, taskTrackerManager.getNumberOfUniqueHosts());
          if (t != null) {
            assignedTasks.add(t);
            ++numLocalMaps;
            
            // Don't assign map tasks to the hilt!
            // Leave some free slots in the cluster for future task-failures,
            // speculative tasks etc. beyond the highest priority job
            if (exceededMapPadding) {
              break scheduleMaps;
            }
           
            // Try all jobs again for the next Map task 
            break;
          }
          
          // Try to schedule a node-local or rack-local Map task
          t = 
            job.obtainNewNonLocalMapTask(taskTrackerStatus, numTaskTrackers,
                                   taskTrackerManager.getNumberOfUniqueHosts());
          
          if (t != null) {
            assignedTasks.add(t);
            ++numNonLocalMaps;
            
            // We assign at most 1 off-switch or speculative task
            // This is to prevent TaskTrackers from stealing local-tasks
            // from other TaskTrackers.
            break scheduleMaps;
          }
        }
      }
    }
    int assignedMaps = assignedTasks.size();

    //
    // Same thing, but for reduce tasks
    // However we _never_ assign more than 1 reduce task per heartbeat
    //
    final int trackerCurrentReduceCapacity = 
      Math.min((int)Math.ceil(reduceLoadFactor * trackerReduceCapacity), 
               trackerReduceCapacity);
    final int availableReduceSlots = 
      Math.min((trackerCurrentReduceCapacity - trackerRunningReduces), 1);
    boolean exceededReducePadding = false;
    if (availableReduceSlots > 0) {
      exceededReducePadding = exceededPadding(false, clusterStatus, 
                                              trackerReduceCapacity);
      synchronized (jobQueue) {
        for (JobInProgress job : jobQueue) {
          if (job.getStatus().getRunState() != JobStatus.RUNNING ||
              job.numReduceTasks == 0) {
            continue;
          }

          Task t = 
            job.obtainNewReduceTask(taskTrackerStatus, numTaskTrackers, 
                                    taskTrackerManager.getNumberOfUniqueHosts()
                                    );
          if (t != null) {
            assignedTasks.add(t);
            break;
          }
          
          // Don't assign reduce tasks to the hilt!
          // Leave some free slots in the cluster for future task-failures,
          // speculative tasks etc. beyond the highest priority job
          if (exceededReducePadding) {
            break;
          }
        }
      }
    }
    
    if (LOG.isDebugEnabled()) {
      LOG.debug("Task assignments for " + taskTrackerStatus.getTrackerName() + " --> " +
                "[" + mapLoadFactor + ", " + trackerMapCapacity + ", " + 
                trackerCurrentMapCapacity + ", " + trackerRunningMaps + "] -> [" + 
                (trackerCurrentMapCapacity - trackerRunningMaps) + ", " +
                assignedMaps + " (" + numLocalMaps + ", " + numNonLocalMaps + 
                ")] [" + reduceLoadFactor + ", " + trackerReduceCapacity + ", " + 
                trackerCurrentReduceCapacity + "," + trackerRunningReduces + 
                "] -> [" + (trackerCurrentReduceCapacity - trackerRunningReduces) + 
                ", " + (assignedTasks.size()-assignedMaps) + "]");
    }

    return assignedTasks;
  }

 Tasktrackers have a fixed number of slots for map tasks and for reduce tasks, and these are set independently. In the context of a given job, the default scheduler fills empty map task slot, the jobtracker will select a map task; otherwise it will select a reduce task.

// Inside JobQueueTaskScheduler.java

//
    // In the below steps, we allocate first map tasks (if appropriate),
    // and then reduce tasks if appropriate.  We go through all jobs
    // in order of job arrival; jobs only get serviced if their 
    // predecessors are serviced, too.
    //

    //
    // We assign tasks to the current taskTracker if the given machine 
    // has a workload that's less than the maximum load of that kind of
    // task.
    // However, if the cluster is close to getting loaded i.e. we don't
    // have enough _padding_ for speculative executions etc., we only 
    // schedule the "highest priority" task i.e. the task from the job 
    // with the highest priority.
    //
    
    final int trackerCurrentMapCapacity = 
      Math.min((int)Math.ceil(mapLoadFactor * trackerMapCapacity), 
                              trackerMapCapacity);
    int availableMapSlots = trackerCurrentMapCapacity - trackerRunningMaps;
    boolean exceededMapPadding = false;
    if (availableMapSlots > 0) {
      exceededMapPadding = 
        exceededPadding(true, clusterStatus, trackerMapCapacity);
    }
    
    int numLocalMaps = 0;
    int numNonLocalMaps = 0;
    scheduleMaps:
    for (int i=0; i < availableMapSlots; ++i) {
      synchronized (jobQueue) {
        for (JobInProgress job : jobQueue) {
          if (job.getStatus().getRunState() != JobStatus.RUNNING) {
            continue;
          }

          Task t = null;
          
          // Try to schedule a node-local or rack-local Map task
          t = 
            job.obtainNewNodeOrRackLocalMapTask(taskTrackerStatus, 
                numTaskTrackers, taskTrackerManager.getNumberOfUniqueHosts());
          if (t != null) {
            assignedTasks.add(t);
            ++numLocalMaps;
            
            // Don't assign map tasks to the hilt!
            // Leave some free slots in the cluster for future task-failures,
            // speculative tasks etc. beyond the highest priority job
            if (exceededMapPadding) {
              break scheduleMaps;
            }
           
            // Try all jobs again for the next Map task 
            break;
          }
          
          // Try to schedule a node-local or rack-local Map task
          t = 
            job.obtainNewNonLocalMapTask(taskTrackerStatus, numTaskTrackers,
                                   taskTrackerManager.getNumberOfUniqueHosts());
          
          if (t != null) {
            assignedTasks.add(t);
            ++numNonLocalMaps;
            
            // We assign at most 1 off-switch or speculative task
            // This is to prevent TaskTrackers from stealing local-tasks
            // from other TaskTrackers.
            break scheduleMaps;
          }
        }
      }
    }
    int assignedMaps = assignedTasks.size();

    //
    // Same thing, but for reduce tasks
    // However we _never_ assign more than 1 reduce task per heartbeat
    //
    final int trackerCurrentReduceCapacity = 
      Math.min((int)Math.ceil(reduceLoadFactor * trackerReduceCapacity), 
               trackerReduceCapacity);
    final int availableReduceSlots = 
      Math.min((trackerCurrentReduceCapacity - trackerRunningReduces), 1);
    boolean exceededReducePadding = false;
    if (availableReduceSlots > 0) {
      exceededReducePadding = exceededPadding(false, clusterStatus, 
                                              trackerReduceCapacity);
      synchronized (jobQueue) {
        for (JobInProgress job : jobQueue) {
          if (job.getStatus().getRunState() != JobStatus.RUNNING ||
              job.numReduceTasks == 0) {
            continue;
          }

          Task t = 
            job.obtainNewReduceTask(taskTrackerStatus, numTaskTrackers, 
                                    taskTrackerManager.getNumberOfUniqueHosts()
                                    );
          if (t != null) {
            assignedTasks.add(t);
            break;
          }
          
          // Don't assign reduce tasks to the hilt!
          // Leave some free slots in the cluster for future task-failures,
          // speculative tasks etc. beyond the highest priority job
          if (exceededReducePadding) {
            break;
          }
        }
      }
    }

 If we don't use the default scheduler, the precise number of slots depends on the number of cores and the amount of memory on the tasktracker.

 

By looking into the default scheduler's source code, we know that for a map task, it maybe data-local, rack-local or non-rack-local, for a reduce task it will simply takes the next in its list of yet-to-be-run reduce tasks. You can tell the proportion of each type of task by looking at a job's counters.

分享到:
评论

相关推荐

    关于__Federico Milano 的电力系统分析工具箱.zip

    1.版本:matlab2014/2019a/2021a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。

    mlab-upenn 研究小组的心脏模型模拟.zip

    1.版本:matlab2014/2019a/2021a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。

    混合图像创建大师matlab代码.zip

    1.版本:matlab2014/2019a/2021a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。

    中序遍历二叉树-java版本

    在Java中,实现二叉树的中序遍历同样可以通过递归来完成。中序遍历的顺序是:首先递归地中序遍历左子树,然后访问根节点,最后递归地中序遍历右子树。 在这段代码中,Node类定义了二叉树的节点,BinaryTree类包含一个指向根节点的指针和inOrder方法,用于递归地进行中序遍历。printInOrder方法调用inOrder方法并打印出遍历的结果。 在Main类中,我们创建了一个示例二叉树,并调用printInOrder方法来输出中序遍历的结果。输出应该是:4 2 5 1 3,这表示中序遍历的顺序是左子树(4),然后是根节点(2),接着是右子树的左子树(5),然后是右子树的根节点(1),最后是右子树的右子树(3)。

    无头单向非循环链表的实现(SList.c)

    无头单向非循环链表的实现(函数定义文件)

    两个有序链表的合并pta

    "PTA" 通常指的是一种在线编程平台,例如“Pata”或者某些特定学校或组织的编程练习与自动评测系统。在这种平台或系统中,学生或程序员会提交代码来解决各种问题,然后系统会自动运行并评测这些代码的正确性。 当提到“两个有序链表的合并PTA”时,这通常意味着在PTA平台上解决一个特定的问题,即合并两个有序链表。具体任务可能是给定两个已按升序排序的链表,要求编写代码来合并这两个链表,形成一个新的有序链表。

    在 Matlab 中创建的图形工具可改善航空航天数据的可视化.zip

    1.版本:matlab2014/2019a/2021a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。

    搜索引擎的设计与实现.zip

    搜索引擎的设计与实现

    年公司财务会计岗位工作总结(二).docx

    工作总结,新年计划,岗位总结,工作汇报,个人总结,述职报告,范文下载,新年总结,新建计划。

    【基于Springboot+Vue的Java毕业设计】无人超市管理系统项目实战(源码+录像演示+说明).rar

    【基于Springboot+Vue的Java毕业设计】无人超市管理系统项目实战(源码+录像演示+说明).rar 【项目技术】 开发语言:Java 框架:Spingboot+vue 架构:B/S 数据库:mysql 【演示视频-编号:314】 https://pan.quark.cn/s/8dea014f4d36 【实现功能】 无人超市管理系统有管理员,用户两个角色。管理员功能有个人中心,用户管理,商品类型管理,支付类型管理,公告类型管理,商品信息管理,出入库管理,出入库详情管理,购买管理,购买详情管理,公告信息管理。用户可以注册登录,自助购买,点击购买管理里面收银就可以选择支付类型和商品然后提交,还可以查看购买详情和公告信息。

    电视的半盲图像去模糊问题,.zip

    1.版本:matlab2014/2019a/2021a 2.附赠案例数据可直接运行matlab程序。 3.代码特点:参数化编程、参数可方便更改、代码编程思路清晰、注释明细。 4.适用对象:计算机,电子信息工程、数学等专业的大学生课程设计、期末大作业和毕业设计。

    公司年会基本流程表.doc

    年会班会资料,节目策划,游戏策划,策划案,策划方案,活动方案,筹办,公司年会,开场白,主持人,策划主题,主持词,小游戏。

    5G智慧港口解决方案.pptx

    在现有省、市港口信息化系统进行有效整合基础上,借鉴新 一代的感知-传输-应用技术体系,实现对码头、船舶、货物、重 大危险源、危险货物装卸过程、航管航运等管理要素的全面感知、 有效传输和按需定制服务,为行政管理人员和相关单位及人员提 供高效的管理辅助,并为公众提供便捷、实时的水运信息服务。 建立信息整合、交换和共享机制,建立健全信息化管理支撑 体系,以及相关标准规范和安全保障体系;按照“绿色循环低碳” 交通的要求,搭建高效、弹性、高可扩展性的基于虚拟技术的信 息基础设施,支撑信息平台低成本运行,实现电子政务建设和服务模式的转变。 实现以感知港口、感知船舶、感知货物为手段,以港航智能 分析、科学决策、高效服务为目的和核心理念,构建“智慧港口”的发展体系。 结合“智慧港口”相关业务工作特点及信息化现状的实际情况,本项目具体建设目标为: 一张图(即GIS 地理信息服务平台) 在建设岸线、港口、港区、码头、泊位等港口主要基础资源图层上,建设GIS 地理信息服务平台,在此基础上依次接入和叠加规划建设、经营、安全、航管等相关业务应用专题数据,并叠 加动态数据,如 AIS/GPS/移动平台数据,逐步建成航运管理处 "一张图"。系统支持扩展框架,方便未来更多应用资源的逐步整合。 现场执法监管系统 基于港口(航管)执法基地建设规划,依托统一的执法区域 管理和数字化监控平台,通过加强对辖区内的监控,结合移动平 台,形成完整的多维路径和信息追踪,真正做到问题能发现、事态能控制、突发问题能解决。 运行监测和辅助决策系统 对区域港口与航运业务日常所需填报及监测的数据经过科 学归纳及分析,采用统一平台,消除重复的填报数据,进行企业 输入和自动录入,并进行系统智能判断,避免填入错误的数据, 输入的数据经过智能组合,自动生成各业务部门所需的数据报 表,包括字段、格式,都可以根据需要进行定制,同时满足扩展 性需要,当有新的业务监测数据表需要产生时,系统将分析新的 需求,将所需字段融合进入日常监测和决策辅助平台的统一平台中,并生成新的所需业务数据监测及决策表。 综合指挥调度系统 建设以港航应急指挥中心为枢纽,以各级管理部门和经营港 口企业为节点,快速调度、信息共享的通信网络,满足应急处置中所需要的信息采集、指挥调度和过程监控等通信保障任务。 设计思路 根据项目的建设目标和“智慧港口”信息化平台的总体框架、 设计思路、建设内容及保障措施,围绕业务协同、信息共享,充 分考虑各航运(港政)管理处内部管理的需求,平台采用“全面 整合、重点补充、突出共享、逐步完善”策略,加强重点区域或 运输通道交通基础设施、运载装备、运行环境的监测监控,完善 运行协调、应急处置通信手段,促进跨区域、跨部门信息共享和业务协同。 以“统筹协调、综合监管”为目标,以提供综合、动态、实 时、准确、实用的安全畅通和应急数据共享为核心,围绕“保畅通、抓安全、促应急"等实际需求来建设智慧港口信息化平台。 系统充分整合和利用航运管理处现有相关信息资源,以地理 信息技术、网络视频技术、互联网技术、移动通信技术、云计算 技术为支撑,结合航运管理处专网与行业数据交换平台,构建航 运管理处与各部门之间智慧、畅通、安全、高效、绿色低碳的智 慧港口信息化平台。 系统充分考虑航运管理处安全法规及安全职责今后的变化 与发展趋势,应用目前主流的、成熟的应用技术,内联外引,优势互补,使系统建设具备良好的开放性、扩展性、可维护性。

    【基于Java+Springboot的毕业设计】线上医院挂号系统(源码+演示视频+说明).rar

    【基于Java+Springboot的毕业设计】线上医院挂号系统(源码+演示视频+说明).rar 【项目技术】 开发语言:Java 框架:Spingboot+vue 架构:B/S 数据库:mysql 【演示视频-编号:300】 https://pan.quark.cn/s/8dea014f4d36 【实现功能】 本次开发的线上医院挂号系统实现了字典管理、论坛管理、会员管理、单页数据管理、医生管理、医生留言管理、医生挂号订单管理、管理员管理等功能。

    年网通营业员个人工作总结.docx

    工作总结,新年计划,岗位总结,工作汇报,个人总结,述职报告,范文下载,新年总结,新建计划。

    财务数据分析模型3.xlsx

    Excel数据看板,Excel办公模板,Excel模板下载,Excel数据统计,数据展示

    最全英语六级真题(从12年到23年总共66个真题)

    最全英语六级真题,从12年到23年总共66个真题。全网最全。

    财务助理实习总结(2).docx

    工作总结,新年计划,岗位总结,工作汇报,个人总结,述职报告,范文下载,新年总结,新建计划。

    基于深度学习的人体姿态识别.zip

    基于深度学习的人体姿态识别.zip

    01. XX塑业有限公司ERP物料编码规则(DOC 6页).doc

    01. XX塑业有限公司ERP物料编码规则(DOC 6页).doc

Global site tag (gtag.js) - Google Analytics