// pkg/scheduler/scheduler.go#L444func(s*Scheduler)Filter(argsextenderv1.ExtenderArgs)(*extenderv1.ExtenderFilterResult,error){klog.InfoS("begin schedule filter","pod",args.Pod.Name,"uuid",args.Pod.UID,"namespaces",args.Pod.Namespace)nums:=k8sutil.Resourcereqs(args.Pod)total:=0for_,n:=rangenums{for_,k:=rangen{total+=int(k.Nums)}}iftotal==0{klog.V(1).Infof("pod %v not find resource",args.Pod.Name)s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},fmt.Errorf("does not request any resource"))return&extenderv1.ExtenderFilterResult{NodeNames:args.NodeNames,FailedNodes:nil,Error:"",},nil}annos:=args.Pod.Annotationss.delPod(args.Pod)nodeUsage,failedNodes,err:=s.getNodesUsage(args.NodeNames,args.Pod)iferr!=nil{s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},err)returnnil,err}iflen(failedNodes)!=0{klog.V(5).InfoS("getNodesUsage failed nodes","nodes",failedNodes)}nodeScores,err:=s.calcScore(nodeUsage,nums,annos,args.Pod)iferr!=nil{err:=fmt.Errorf("calcScore failed %v for pod %v",err,args.Pod.Name)s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},err)returnnil,err}iflen((*nodeScores).NodeList)==0{klog.V(4).Infof("All node scores do not meet for pod %v",args.Pod.Name)s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},fmt.Errorf("no available node, all node scores do not meet"))return&extenderv1.ExtenderFilterResult{FailedNodes:failedNodes,},nil}klog.V(4).Infoln("nodeScores_len=",len((*nodeScores).NodeList))sort.Sort(nodeScores)m:=(*nodeScores).NodeList[len((*nodeScores).NodeList)-1]klog.Infof("schedule %v/%v to %v %v",args.Pod.Namespace,args.Pod.Name,m.NodeID,m.Devices)annotations:=make(map[string]string)annotations[util.AssignedNodeAnnotations]=m.NodeIDannotations[util.AssignedTimeAnnotations]=strconv.FormatInt(time.Now().Unix(),10)for_,val:=rangedevice.GetDevices(){val.PatchAnnotations(&annotations,m.Devices)}//InRequestDevices := util.EncodePodDevices(util.InRequestDevices, m.devices)//supportDevices := util.EncodePodDevices(util.SupportDevices, m.devices)//maps.Copy(annotations, InRequestDevices)//maps.Copy(annotations, supportDevices)s.addPod(args.Pod,m.NodeID,m.Devices)err=util.PatchPodAnnotations(args.Pod,annotations)iferr!=nil{s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},err)s.delPod(args.Pod)returnnil,err}s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringSucceed,[]string{m.NodeID},nil)res:=extenderv1.ExtenderFilterResult{NodeNames:&[]string{m.NodeID}}return&res,nil}
// pkg/scheduler/score.go#L185func(s*Scheduler)calcScore(nodes*map[string]*NodeUsage,numsutil.PodDeviceRequests,annosmap[string]string,task*corev1.Pod)(*policy.NodeScoreList,error){userNodePolicy:=config.NodeSchedulerPolicyifannos!=nil{ifvalue,ok:=annos[policy.NodeSchedulerPolicyAnnotationKey];ok{userNodePolicy=value}}res:=policy.NodeScoreList{Policy:userNodePolicy,NodeList:make([]*policy.NodeScore,0),}//func calcScore(nodes *map[string]*NodeUsage, errMap *map[string]string, nums util.PodDeviceRequests, annos map[string]string, task *corev1.Pod) (*NodeScoreList, error) {// res := make(NodeScoreList, 0, len(*nodes))fornodeID,node:=range*nodes{viewStatus(*node)score:=policy.NodeScore{NodeID:nodeID,Devices:make(util.PodDevices),Score:0}score.ComputeScore(node.Devices)//This loop is for different container requestctrfit:=falseforctrid,n:=rangenums{sums:=0for_,k:=rangen{sums+=int(k.Nums)}ifsums==0{foridx:=rangescore.Devices{iflen(score.Devices[idx])<=ctrid{score.Devices[idx]=append(score.Devices[idx],util.ContainerDevices{})}score.Devices[idx][ctrid]=append(score.Devices[idx][ctrid],util.ContainerDevice{})continue}}klog.V(5).InfoS("fitInDevices","pod",klog.KObj(task),"node",nodeID)fit,_:=fitInDevices(node,n,annos,task,&score.Devices)ctrfit=fitif!fit{klog.InfoS("calcScore:node not fit pod","pod",klog.KObj(task),"node",nodeID)break}}ifctrfit{res.NodeList=append(res.NodeList,&score)}}return&res,nil}
// pkg/scheduler/policy/node_policy.go#L53func(ns*NodeScore)ComputeScore(devicesDeviceUsageList){// current user having request resourceused,usedCore,usedMem:=int32(0),int32(0),int32(0)for_,device:=rangedevices.DeviceLists{used+=device.Device.UsedusedCore+=device.Device.UsedcoresusedMem+=device.Device.Usedmem}klog.V(2).Infof("node %s used %d, usedCore %d, usedMem %d,",ns.NodeID,used,usedCore,usedMem)total,totalCore,totalMem:=int32(0),int32(0),int32(0)for_,deviceLists:=rangedevices.DeviceLists{total+=deviceLists.Device.CounttotalCore+=deviceLists.Device.TotalcoretotalMem+=deviceLists.Device.Totalmem}useScore:=float32(used)/float32(total)coreScore:=float32(usedCore)/float32(totalCore)memScore:=float32(usedMem)/float32(totalMem)ns.Score=float32(Weight)*(useScore+coreScore+memScore)klog.V(2).Infof("node %s computer score is %f",ns.NodeID,ns.Score)}
ctrfit:=falseforctrid,n:=rangenums{sums:=0for_,k:=rangen{sums+=int(k.Nums)}ifsums==0{foridx:=rangescore.Devices{iflen(score.Devices[idx])<=ctrid{score.Devices[idx]=append(score.Devices[idx],util.ContainerDevices{})}score.Devices[idx][ctrid]=append(score.Devices[idx][ctrid],util.ContainerDevice{})continue}}klog.V(5).InfoS("fitInDevices","pod",klog.KObj(task),"node",nodeID)fit,_:=fitInDevices(node,n,annos,task,&score.Devices)ctrfit=fitif!fit{klog.InfoS("calcScore:node not fit pod","pod",klog.KObj(task),"node",nodeID)break}}ifctrfit{res.NodeList=append(res.NodeList,&score)}
funcfitInCertainDevice(node*NodeUsage,requestutil.ContainerDeviceRequest,annosmap[string]string,pod*corev1.Pod)(bool,map[string]util.ContainerDevices){// ....fori:=len(node.Devices.DeviceLists)-1;i>=0;i--{ifnode.Devices.DeviceLists[i].Device.Totalmem-node.Devices.DeviceLists[i].Device.Usedmem<memreq{continue}ifnode.Devices.DeviceLists[i].Device.Totalcore-node.Devices.DeviceLists[i].Device.Usedcores<k.Coresreq{continue}// Coresreq=100 indicates it want this card exclusivelyifnode.Devices.DeviceLists[i].Device.Totalcore==100&&k.Coresreq==100&&node.Devices.DeviceLists[i].Device.Used>0{continue}// You can't allocate core=0 job to an already full GPUifnode.Devices.DeviceLists[i].Device.Totalcore!=0&&node.Devices.DeviceLists[i].Device.Usedcores==node.Devices.DeviceLists[i].Device.Totalcore&&k.Coresreq==0{continue}ifk.Nums>0{klog.InfoS("first fitted","pod",klog.KObj(pod),"device",node.Devices.DeviceLists[i].Device.ID)k.Nums--tmpDevs[k.Type]=append(tmpDevs[k.Type],util.ContainerDevice{Idx:int(node.Devices.DeviceLists[i].Device.Index),UUID:node.Devices.DeviceLists[i].Device.ID,Type:k.Type,Usedmem:memreq,Usedcores:k.Coresreq,})}ifk.Nums==0{klog.InfoS("device allocate success","pod",klog.KObj(pod),"allocate device",tmpDevs)returntrue,tmpDevs}}returnfalse,tmpDevs}
这样,我们就把不满足条件的节点给过滤掉了,剩下的节点都是可以正常调度 Pod 的,不过具体选择哪个节点还需要依赖于配置的调度策略。
// pkg/scheduler/policy/node_policy.go#L32typeNodeScoreListstruct{NodeList[]*NodeScorePolicystring}func(lNodeScoreList)Len()int{returnlen(l.NodeList)}func(lNodeScoreList)Swap(i,jint){l.NodeList[i],l.NodeList[j]=l.NodeList[j],l.NodeList[i]}func(lNodeScoreList)Less(i,jint)bool{ifl.Policy==NodeSchedulerPolicySpread.String(){returnl.NodeList[i].Score>l.NodeList[j].Score}// default policy is Binpackreturnl.NodeList[i].Score<l.NodeList[j].Score}
核心部分:
1
2
3
4
5
6
7
func(lNodeScoreList)Less(i,jint)bool{ifl.Policy==NodeSchedulerPolicySpread.String(){returnl.NodeList[i].Score>l.NodeList[j].Score}// default policy is Binpackreturnl.NodeList[i].Score<l.NodeList[j].Score}
根据我们的 Policy 不同,有两种排序方式,而且排序正好相反。
1
2
3
4
// NodeSchedulerPolicyBinpack is node use binpack scheduler policy.NodeSchedulerPolicyBinpackSchedulerPolicyName="binpack"// NodeSchedulerPolicySpread is node use spread scheduler policy.NodeSchedulerPolicySpreadSchedulerPolicyName="spread"
这里涉及到 sort.Sort() 的实现,简单来说:
如果Less()方法中使用大于(>)比较,最终排序结果将是降序。
如果Less()方法中使用小于(<)比较,最终排序结果将是升序。
对应到调度策略:
Binpack 策略使用 小于(<)比较,最终排序结果将是升序
Spread 策略使用 大于(>)比较,最终排序结果将是降序
又因为前面打分时的规则是:剩余资源越少,得分越低,再加上我们会选择排序后的最后一个节点。
至此,逻辑就清晰了。
Binpack 策略选择最后一个节点,因为升序排列,最后一个 Node 得分最高,即:空闲资源最少
Spread 策略选择最后一个节点,因为降序排列,最后一个 Node 得分最低,即:空闲资源最多
正好符合了策略的原本含义:
Binpack 则是让所有 Pod 尽量调度到同一个节点,优先把一个节点资源用完,然后再使用其他节点。
// pkg/scheduler/scheduler.go#L444func(s*Scheduler)Filter(argsextenderv1.ExtenderArgs)(*extenderv1.ExtenderFilterResult,error){klog.InfoS("begin schedule filter","pod",args.Pod.Name,"uuid",args.Pod.UID,"namespaces",args.Pod.Namespace)nums:=k8sutil.Resourcereqs(args.Pod)total:=0for_,n:=rangenums{for_,k:=rangen{total+=int(k.Nums)}}iftotal==0{klog.V(1).Infof("pod %v not find resource",args.Pod.Name)s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},fmt.Errorf("does not request any resource"))return&extenderv1.ExtenderFilterResult{NodeNames:args.NodeNames,FailedNodes:nil,Error:"",},nil}annos:=args.Pod.Annotationss.delPod(args.Pod)nodeUsage,failedNodes,err:=s.getNodesUsage(args.NodeNames,args.Pod)iferr!=nil{s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},err)returnnil,err}iflen(failedNodes)!=0{klog.V(5).InfoS("getNodesUsage failed nodes","nodes",failedNodes)}nodeScores,err:=s.calcScore(nodeUsage,nums,annos,args.Pod)iferr!=nil{err:=fmt.Errorf("calcScore failed %v for pod %v",err,args.Pod.Name)s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},err)returnnil,err}iflen((*nodeScores).NodeList)==0{klog.V(4).Infof("All node scores do not meet for pod %v",args.Pod.Name)s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},fmt.Errorf("no available node, all node scores do not meet"))return&extenderv1.ExtenderFilterResult{FailedNodes:failedNodes,},nil}klog.V(4).Infoln("nodeScores_len=",len((*nodeScores).NodeList))sort.Sort(nodeScores)m:=(*nodeScores).NodeList[len((*nodeScores).NodeList)-1]klog.Infof("schedule %v/%v to %v %v",args.Pod.Namespace,args.Pod.Name,m.NodeID,m.Devices)annotations:=make(map[string]string)annotations[util.AssignedNodeAnnotations]=m.NodeIDannotations[util.AssignedTimeAnnotations]=strconv.FormatInt(time.Now().Unix(),10)for_,val:=rangedevice.GetDevices(){val.PatchAnnotations(&annotations,m.Devices)}//InRequestDevices := util.EncodePodDevices(util.InRequestDevices, m.devices)//supportDevices := util.EncodePodDevices(util.SupportDevices, m.devices)//maps.Copy(annotations, InRequestDevices)//maps.Copy(annotations, supportDevices)s.addPod(args.Pod,m.NodeID,m.Devices)err=util.PatchPodAnnotations(args.Pod,annotations)iferr!=nil{s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringFailed,[]string{},err)s.delPod(args.Pod)returnnil,err}s.recordScheduleFilterResultEvent(args.Pod,EventReasonFilteringSucceed,[]string{m.NodeID},nil)res:=extenderv1.ExtenderFilterResult{NodeNames:&[]string{m.NodeID}}return&res,nil}
funcfitInDevices(node*NodeUsage,requestsutil.ContainerDeviceRequests,annosmap[string]string,pod*corev1.Pod,devinput*util.PodDevices)(bool,float32){//devmap := make(map[string]util.ContainerDevices)devs:=util.ContainerDevices{}total,totalCore,totalMem:=int32(0),int32(0),int32(0)free,freeCore,freeMem:=int32(0),int32(0),int32(0)sums:=0// computer all device score for one nodeforindex:=rangenode.Devices.DeviceLists{node.Devices.DeviceLists[index].ComputeScore(requests)}//This loop is for requests for different devicesfor_,k:=rangerequests{sums+=int(k.Nums)ifint(k.Nums)>len(node.Devices.DeviceLists){klog.InfoS("request devices nums cannot exceed the total number of devices on the node.","pod",klog.KObj(pod),"request devices nums",k.Nums,"node device nums",len(node.Devices.DeviceLists))returnfalse,0}sort.Sort(node.Devices)fit,tmpDevs:=fitInCertainDevice(node,k,annos,pod)iffit{for_,val:=rangetmpDevs[k.Type]{total+=node.Devices.DeviceLists[val.Idx].Device.CounttotalCore+=node.Devices.DeviceLists[val.Idx].Device.TotalcoretotalMem+=node.Devices.DeviceLists[val.Idx].Device.Totalmemfree+=node.Devices.DeviceLists[val.Idx].Device.Count-node.Devices.DeviceLists[val.Idx].Device.UsedfreeCore+=node.Devices.DeviceLists[val.Idx].Device.Totalcore-node.Devices.DeviceLists[val.Idx].Device.UsedcoresfreeMem+=node.Devices.DeviceLists[val.Idx].Device.Totalmem-node.Devices.DeviceLists[val.Idx].Device.Usedmemnode.Devices.DeviceLists[val.Idx].Device.Used++node.Devices.DeviceLists[val.Idx].Device.Usedcores+=val.Usedcoresnode.Devices.DeviceLists[val.Idx].Device.Usedmem+=val.Usedmem}devs=append(devs,tmpDevs[k.Type]...)}else{returnfalse,0}(*devinput)[k.Type]=append((*devinput)[k.Type],devs)}returntrue,0}
funcfitInCertainDevice(node*NodeUsage,requestutil.ContainerDeviceRequest,annosmap[string]string,pod*corev1.Pod)(bool,map[string]util.ContainerDevices){fori:=len(node.Devices.DeviceLists)-1;i>=0;i--{continue}ifnode.Devices.DeviceLists[i].Device.Totalcore-node.Devices.DeviceLists[i].Device.Usedcores<k.Coresreq{continue}// Coresreq=100 indicates it want this card exclusivelyifnode.Devices.DeviceLists[i].Device.Totalcore==100&&k.Coresreq==100&&node.Devices.DeviceLists[i].Device.Used>0{continue}// You can't allocate core=0 job to an already full GPUifnode.Devices.DeviceLists[i].Device.Totalcore!=0&&node.Devices.DeviceLists[i].Device.Usedcores==node.Devices.DeviceLists[i].Device.Totalcore&&k.Coresreq==0{continue}ifk.Nums>0{k.Nums--tmpDevs[k.Type]=append(tmpDevs[k.Type],util.ContainerDevice{Idx:int(node.Devices.DeviceLists[i].Device.Index),UUID:node.Devices.DeviceLists[i].Device.ID,Type:k.Type,Usedmem:memreq,Usedcores:k.Coresreq,})}ifk.Nums==0{klog.InfoS("device allocate success","pod",klog.KObj(pod),"allocate device",tmpDevs)returntrue,tmpDevs}}returnfalse,tmpDevs}}
root@test:~/lixd/hami# k get po hami-30 -oyaml
apiVersion: v1
kind: Pod
metadata:
annotations:
hami.io/bind-phase: allocating
hami.io/bind-time: "1732072495" hami.io/vgpu-devices-allocated: GPU-1afede84-4e70-2174-49af-f07ebb94d1ae,NVIDIA,20000,30:; hami.io/vgpu-devices-to-allocate: GPU-1afede84-4e70-2174-49af-f07ebb94d1ae,NVIDIA,20000,30:; hami.io/vgpu-node: test hami.io/vgpu-time: "1732072495"
hami.io/vgpu-devices-to-allocate 是 Scheduler 为 Pod 选择的目标 GPU
hami.io/vgpu-devices-allocated 是当前已经分配的
ps:对于已经调度的 Pod hami.io/vgpu-devices-to-allocate 会被清空
调度完成后,DevicePlugin 直接读取 hami.io/vgpu-devices-to-allocate 就知道要为该 Pod 分配哪些 GPU 了。
根据策略选择 GPU
前面都已经选出了满足条件的 GPU 甚至都记录到了 Pod 的 Annoations 上了,那么 GPU 调度策略是什么时候生效的呢?
func(ds*DeviceListsScore)ComputeScore(requestsutil.ContainerDeviceRequests){request,core,mem:=int32(0),int32(0),int32(0)// Here we are required to use the same type devicefor_,container:=rangerequests{request+=container.Numscore+=container.Coresreqifcontainer.MemPercentagereq!=0&&container.MemPercentagereq!=101{mem+=ds.Device.Totalmem*(container.MemPercentagereq/100.0)continue}mem+=container.Memreq}klog.V(2).Infof("device %s user %d, userCore %d, userMem %d,",ds.Device.ID,ds.Device.Used,ds.Device.Usedcores,ds.Device.Usedmem)usedScore:=float32(request+ds.Device.Used)/float32(ds.Device.Count)coreScore:=float32(core+ds.Device.Usedcores)/float32(ds.Device.Totalcore)memScore:=float32(mem+ds.Device.Usedmem)/float32(ds.Device.Totalmem)ds.Score=float32(Weight)*(usedScore+coreScore+memScore)klog.V(2).Infof("device %s computer score is %f",ds.Device.ID,ds.Score)}
func(lDeviceUsageList)Len()int{returnlen(l.DeviceLists)}func(lDeviceUsageList)Swap(i,jint){l.DeviceLists[i],l.DeviceLists[j]=l.DeviceLists[j],l.DeviceLists[i]}func(lDeviceUsageList)Less(i,jint)bool{ifl.Policy==GPUSchedulerPolicyBinpack.String(){ifl.DeviceLists[i].Device.Numa==l.DeviceLists[j].Device.Numa{returnl.DeviceLists[i].Score<l.DeviceLists[j].Score}returnl.DeviceLists[i].Device.Numa>l.DeviceLists[j].Device.Numa}// default policy is spreadifl.DeviceLists[i].Device.Numa==l.DeviceLists[j].Device.Numa{returnl.DeviceLists[i].Score>l.DeviceLists[j].Score}returnl.DeviceLists[i].Device.Numa<l.DeviceLists[j].Device.Numa}
funcfitInCertainDevice(node*NodeUsage,requestutil.ContainerDeviceRequest,annosmap[string]string,pod*corev1.Pod)(bool,map[string]util.ContainerDevices){fori:=len(node.Devices.DeviceLists)-1;i>=0;i--{continue}ifnode.Devices.DeviceLists[i].Device.Totalcore-node.Devices.DeviceLists[i].Device.Usedcores<k.Coresreq{continue}// Coresreq=100 indicates it want this card exclusivelyifnode.Devices.DeviceLists[i].Device.Totalcore==100&&k.Coresreq==100&&node.Devices.DeviceLists[i].Device.Used>0{continue}// You can't allocate core=0 job to an already full GPUifnode.Devices.DeviceLists[i].Device.Totalcore!=0&&node.Devices.DeviceLists[i].Device.Usedcores==node.Devices.DeviceLists[i].Device.Totalcore&&k.Coresreq==0{continue}ifk.Nums>0{k.Nums--tmpDevs[k.Type]=append(tmpDevs[k.Type],util.ContainerDevice{Idx:int(node.Devices.DeviceLists[i].Device.Index),UUID:node.Devices.DeviceLists[i].Device.ID,Type:k.Type,Usedmem:memreq,Usedcores:k.Coresreq,})}ifk.Nums==0{klog.InfoS("device allocate success","pod",klog.KObj(pod),"allocate device",tmpDevs)returntrue,tmpDevs}}returnfalse,tmpDevs}}
// pkg/util/util.go#L281funcGetNextDeviceRequest(dtypestring,pcorev1.Pod)(corev1.Container,ContainerDevices,error){pdevices,err:=DecodePodDevices(InRequestDevices,p.Annotations)iferr!=nil{returncorev1.Container{},ContainerDevices{},err}klog.Infof("pod annotation decode vaule is %+v",pdevices)res:=ContainerDevices{}pd,ok:=pdevices[dtype]if!ok{returncorev1.Container{},res,errors.New("device request not found")}forctridx,ctrDevice:=rangepd{iflen(ctrDevice)>0{returnp.Spec.Containers[ctridx],ctrDevice,nil}}returncorev1.Container{},res,errors.New("device request not found")}// pkg/util/util.go#L254funcDecodePodDevices(checklistmap[string]string,annosmap[string]string)(PodDevices,error){klog.V(5).Infof("checklist is [%+v], annos is [%+v]",checklist,annos)iflen(annos)==0{returnPodDevices{},nil}pd:=make(PodDevices)fordevID,devs:=rangechecklist{str,ok:=annos[devs]if!ok{continue}pd[devID]=make(PodSingleDevice,0)for_,s:=rangestrings.Split(str,OnePodMultiContainerSplitSymbol){cd,err:=DecodeContainerDevices(s)iferr!=nil{returnPodDevices{},nil}iflen(cd)==0{continue}pd[devID]=append(pd[devID],cd)}}klog.InfoS("Decoded pod annos","poddevices",pd)returnpd,nil}
// pkg/util/util.go#L223funcDecodeContainerDevices(strstring)(ContainerDevices,error){iflen(str)==0{returnContainerDevices{},nil}cd:=strings.Split(str,OneContainerMultiDeviceSplitSymbol)contdev:=ContainerDevices{}tmpdev:=ContainerDevice{}klog.V(5).Infof("Start to decode container device %s",str)iflen(str)==0{returnContainerDevices{},nil}for_,val:=rangecd{ifstrings.Contains(val,","){//fmt.Println("cd is ", val)tmpstr:=strings.Split(val,",")iflen(tmpstr)<4{returnContainerDevices{},fmt.Errorf("pod annotation format error; information missing, please do not use nodeName field in task")}tmpdev.UUID=tmpstr[0]tmpdev.Type=tmpstr[1]devmem,_:=strconv.ParseInt(tmpstr[2],10,32)tmpdev.Usedmem=int32(devmem)devcores,_:=strconv.ParseInt(tmpstr[3],10,32)tmpdev.Usedcores=int32(devcores)contdev=append(contdev,tmpdev)}}klog.V(5).Infof("Finished decoding container devices. Total devices: %d",len(contdev))returncontdev,nil}