Spark 2.1 backend implementation vary greatly from local mode to yarn mode

来源:互联网 发布:淘宝个体营业执照办理 编辑:程序博客网 时间:2024/06/09 17:05

In local mode, backend is instance of LocalSchedulerBackend.

val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)

In yarn client mode, backend is instance of YarnClientSchedulerBackend.

override def createSchedulerBackend(sc: SparkContext,      masterURL: String,      scheduler: TaskScheduler): SchedulerBackend = {    sc.deployMode match {      case "cluster" =>        new YarnClusterSchedulerBackend(scheduler.asInstanceOf[TaskSchedulerImpl], sc)      case "client" =>        new YarnClientSchedulerBackend(scheduler.asInstanceOf[TaskSchedulerImpl], sc)      case  _ =>        throw new SparkException(s"Unknown deploy mode '${sc.deployMode}' for Yarn")    }  }
private[spark] class YarnClientSchedulerBackend(    scheduler: TaskSchedulerImpl,    sc: SparkContext)  extends YarnSchedulerBackend(scheduler, sc)  with Logging {
  • YarnSchedulerBackend
/** * Abstract Yarn scheduler backend that contains common logic * between the client and cluster Yarn scheduler backends. */private[spark] abstract class YarnSchedulerBackend(    scheduler: TaskSchedulerImpl,    sc: SparkContext)  extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv) {
  • CoarseGrainedSchedulerBackend
/** * A scheduler backend that waits for coarse-grained executors to connect. * This backend holds onto each executor for the duration of the Spark job rather than relinquishing * executors whenever a task is done and asking the scheduler to launch a new executor for * each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the * coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode * (spark.deploy.*). */private[spark]class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)  extends ExecutorAllocationClient with SchedulerBackend with Logging
0 0
原创粉丝点击