Spark源码分析-schedule()

来源:互联网 发布:大闹天宫瑰羽7进8数据 编辑:程序博客网 时间:2024/05/21 09:58

Driver的调度源码

private def schedule() {    //如果master是standby master 那么这个master将什么也不做,然后返回    //这也说明了standby master不负责调度    if (state != RecoveryState.ALIVE) { return }    // First schedule drivers, they take strict precedence over applications    // Randomization helps balance drivers    //过滤出状态为ALIVE的worker,并对它们进行随机的shuffle,shuffle的代码在该代码后    //Random.shuffle后,shuffledAliveWorkers里的元素全部被打乱    val shuffledAliveWorkers = Random.shuffle(workers.toSeq.filter(_.state == WorkerState.ALIVE))    //得到workers节点的数量    val numWorkersAlive = shuffledAliveWorkers.size    var curPos = 0    //首先调度driver    //遍历waitingDrivers队列里的每一个driver    for (driver <- waitingDrivers.toList) { // iterate over a copy of waitingDrivers      // We assign workers to each waiting driver in a round-robin fashion. For each driver, we      // start from the last worker that was assigned a driver, and continue onwards until we have      // explored all alive workers.      //标记driver是否被调度      var launched = false      //遍历了几个worker      var numWorkersVisited = 0      //如果driver遍历的worker的数量小于活着的worker的数量,也就是说还有活着的worker没有遍历,并且该driver还没有被调度,那么进入循环      while (numWorkersVisited < numWorkersAlive && !launched) {        //取到第curpos位上的worker,curPos初始为0        val worker = shuffledAliveWorkers(curPos)        //访问过的worker数+1        numWorkersVisited += 1        如果worker能满足driver的需求        if (worker.memoryFree >= driver.desc.mem && worker.coresFree >= driver.desc.cores) {          //将driver调度到该worker上          launchDriver(worker, driver)          //将driver从等待队列中移除          waitingDrivers -= driver          //标记改为已发布          launched = true        }        //指针下移一位        curPos = (curPos + 1) % numWorkersAlive      }    }  }

shuffle源码

def shuffle[T, CC[X] <: TraversableOnce[X]](xs: CC[T])(implicit bf: CanBuildFrom[CC[T], T, CC[T]]): CC[T] = {    val buf = new ArrayBuffer[T] ++= xs    def swap(i1: Int, i2: Int) {      val tmp = buf(i1)      buf(i1) = buf(i2)      buf(i2) = tmp    }    //n从数组末尾开始,k属于[0,buf.length)    //将buf(n)与buf(k)交换,n向前移一位,循环往复,将buf内的元素全部打乱    for (n <- buf.length to 2 by -1) {      val k = nextInt(n)      swap(n - 1, k)    }    (bf(xs) ++= buf).result  }

launchDriver的源码

def launchDriver(worker: WorkerInfo, driver: DriverInfo) {    logInfo("Launching driver " + driver.id + " on worker " + worker.id)    //将driver加入worker的内存结构中    worker.addDriver(driver)    driver.worker = Some(worker) //代表worker有值存在    //向woker发送launchDriver信息    worker.actor ! LaunchDriver(driver.id, driver.desc)    driver.state = DriverState.RUNNING  }

addDriver()的源码

 def addDriver(driver: DriverInfo) {    drivers(driver.id) = driver    memoryUsed += driver.desc.mem    coresUsed += driver.desc.cores  }

Application的调度机制,重中之重

if (spreadOutApps) {      // Try to spread out each app among all the nodes, until it has all its cores      //遍历waitingApps中需要cores的app      for (app <- waitingApps if app.coresLeft > 0) {        //先遍历出worker是ALIVE的worker,然后再从中遍历出app能够使用的worker,然后将这些worker根据剩余的cpu数量进行倒序排序        val usableWorkers = workers.toArray.filter(_.state == WorkerState.ALIVE)          .filter(canUse(app, _)).sortBy(_.coresFree).reverse        //得到可用worker的数量        val numUsable = usableWorkers.length        //创建一个空数组,存储了每个节点上应该分配的cpu数量,初始都是0        val assigned = new Array[Int](numUsable) // Number of cores to give on each node        //得到要分配的cpu数量,是app所需的和workers总共可用数量相比少的那一个        var toAssign = math.min(app.coresLeft, usableWorkers.map(_.coresFree).sum)        var pos = 0        while (toAssign > 0) {          if (usableWorkers(pos).coresFree - assigned(pos) > 0) {            toAssign -= 1            assigned(pos) += 1          }          pos = (pos + 1) % numUsable        }        //开始都是决定应该分配多少,接下来才是正式开始分配        // Now that we've decided how many cores to give on each node, let's actually give them        for (pos <- 0 until numUsable) {          if (assigned(pos) > 0) {            //先在app上注册这些executor,包括它的数量            val exec = app.addExecutor(usableWorkers(pos), assigned(pos))            launchExecutor(usableWorkers(pos), exec)            app.state = ApplicationState.RUNNING          }        }        //这种算法实际上是平均分配      }    } else {      // Pack each app into as few nodes as possible until we've assigned all its cores      //遍历活着的并且cpu可用数>0的worker。      for (worker <- workers if worker.coresFree > 0 && worker.state == WorkerState.ALIVE) {        //遍历在等待队列上的所需cpu>0的app        for (app <- waitingApps if app.coresLeft > 0) {          //如果这个worker能被这个app使用          if (canUse(app, worker)) {            得到最大的可以使用的cpu数量            val coresToUse = math.min(worker.coresFree, app.coresLeft)            //如果可以使用的数量大于0,则开始在这个worker上启动executor            if (coresToUse > 0) {              val exec = app.addExecutor(worker, coresToUse)              launchExecutor(worker, exec)              app.state = ApplicationState.RUNNING            }          }        }      }    }

canUse的源码

def canUse(app: ApplicationInfo, worker: WorkerInfo): Boolean = {    //如果worker剩余的内存大于app一个slave所需的节点,并且该worker上之前没有运行过该app    worker.memoryFree >= app.desc.memoryPerSlave && !worker.hasExecutor(app)  }

addExecutor()的源码

def addExecutor(worker: WorkerInfo, cores: Int, useID: Option[Int] = None): ExecutorDesc = {    //创建了一个executor    val exec = new ExecutorDesc(newExecutorId(useID), this, worker, cores, desc.memoryPerSlave)    executors(exec.id) = exec    //已分配的cpu数量增加cores个    coresGranted += cores    exec  }

launchExecutor()的源码

def launchExecutor(worker: WorkerInfo, exec: ExecutorDesc) {    logInfo("Launching executor " + exec.fullId + " on worker " + worker.id)    //再worker上添加Executor    worker.addExecutor(exec)    //向worker发送信息,启动Executor    worker.actor ! LaunchExecutor(masterUrl,      exec.application.id, exec.id, exec.application.desc, exec.cores, exec.memory)    //向app的driver发送注册信息    exec.application.driver ! ExecutorAdded(      exec.id, worker.id, worker.hostPort, exec.cores, exec.memory)  }
原创粉丝点击