kubelet启动pod源码分析(三)

来源:互联网 发布:小说制作软件 编辑:程序博客网 时间:2024/06/06 03:47

之前的blog分析了kubelet启动pod的流程,那么pod一旦启动了,谁去上报状态呢?还是回到之前代码syncLoopIteration,这个里面有四个输入源,第一次创建接受到configch add这个没有问题,然后是启动容器,此时会上报到容器正在创建的状态到apiserver,还会接收到收到configch reconcile。那么关键的来,什么时候上报成功的呢?
pkg/kubelet/pleg/generic.go里面的rslist方法
当比较新老record状态,如果不一致则更新本地缓存并且发送到channel

g.eventChannel <- events[i]

在kubelet.go里面

func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) {    syncTicker := time.NewTicker(time.Second)    defer syncTicker.Stop()    housekeepingTicker := time.NewTicker(housekeepingPeriod)    defer housekeepingTicker.Stop()    plegCh := kl.pleg.Watch()    for {        if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {            glog.Infof("skipping pod synchronization - %v", rs)            time.Sleep(5 * time.Second)            continue        }        kl.syncLoopMonitor.Store(kl.clock.Now())        if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {            break        }        kl.syncLoopMonitor.Store(kl.clock.Now())    }}

上面的 kl.pleg.Watch()就是获取到event。这个就是syncLoopIteration的plegCh,
那么此时将进入syncLoopIteration的

    case e := <-plegCh:        if isSyncPodWorthy(e) {            // PLEG event for a pod; sync it.            if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {                glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e)                handler.HandlePodSyncs([]*v1.Pod{pod})            } else {                // If the pod no longer exists, ignore the event.                glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e)            }        }        if e.Type == pleg.ContainerDied {            if containerID, ok := e.Data.(string); ok {                kl.cleanUpContainersInPod(e.ID, containerID)            }        }

这个方法和之前一样还是走到kubelet.go里面的syncPod这个方法

kl.statusManager.SetPodStatus(pod, apiPodStatus)

此时pod是
Status:PodStatus{Phase:Pending,Conditions:[{ {Ready False ContainersNotReady containers with unready status: [wordpress]} {PodScheduled}],Message:,Reason:,HostIP:10.39.0.45,ContainerStatuses:[{wordpress {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 busybox }],QOSClass:BestEffort,InitContainerStatuses:[],},}
容器处于正在创建中的状态,但本地pleg已经感知容器已经创建成功,此时的apiPodStatus是
{Running [ [{wordpress {nil &ContainerStateRunning{StartedAt:2017-12-19 07:36:45.876193223 +0800 CST,} nil} {nil nil nil} true 0 }] BestEffort}
最终还是通过pkg/kubelet/status/status_manager.go去同步的apiserver

func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUpdate bool) bool {    var oldStatus v1.PodStatus    cachedStatus, isCached := m.podStatuses[pod.UID]    if isCached {        oldStatus = cachedStatus.status    } else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {        oldStatus = mirrorPod.Status    } else {        oldStatus = pod.Status    }    // 此时是oldStatus ContainerCreating新状态已经是Running    if _, readyCondition := podutil.GetPodCondition(&status, v1.PodReady); readyCondition != nil {        // Need to set LastTransitionTime.        lastTransitionTime := metav1.Now()        _, oldReadyCondition := podutil.GetPodCondition(&oldStatus, v1.PodReady)        if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {            lastTransitionTime = oldReadyCondition.LastTransitionTime        }        readyCondition.LastTransitionTime = lastTransitionTime    }    // Set InitializedCondition.LastTransitionTime.    if _, initCondition := podutil.GetPodCondition(&status, v1.PodInitialized); initCondition != nil {        // Need to set LastTransitionTime.        lastTransitionTime := metav1.Now()        _, oldInitCondition := podutil.GetPodCondition(&oldStatus, v1.PodInitialized)        if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status {            lastTransitionTime = oldInitCondition.LastTransitionTime        }        initCondition.LastTransitionTime = lastTransitionTime    }    // ensure that the start time does not change across updates.    if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {        status.StartTime = oldStatus.StartTime    } else if status.StartTime.IsZero() {        // if the status has no start time, we need to set an initial time        now := metav1.Now()        status.StartTime = &now    }    normalizeStatus(pod, &status)    //此时判断,如果新状态和本地cache状态一致则没有必要更新到apiserver    if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {        glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)        return false // No new status.    }    newStatus := versionedPodStatus{        status:       status,        version:      cachedStatus.version + 1,        podName:      pod.Name,        podNamespace: pod.Namespace,    }    m.podStatuses[pod.UID] = newStatus    select {    case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}:        glog.V(5).Infof("Status Manager: adding pod: %q, with status: (%q, %v) to podStatusChannel",            pod.UID, newStatus.version, newStatus.status)        return true    default:        // Let the periodic syncBatch handle the update if the channel is full.        // We can't block, since we hold the mutex lock.        glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v",            format.Pod(pod), status)        return false    }}

最终通过pkg/kubelet/status/status_manager.go的syncPod同步到apiserver

newPod, err := m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod)

当然处理pleg的rslist会同步,syncLoopIteration的其它两个定时时钟也可以触发,譬如 SYNC也会定时触发更新本地和apiserver直接的信息。

原创粉丝点击