在k8s中对存储做了一次性能测试

1,809次阅读
没有评论

共计 19953 个字符,预计需要花费 50 分钟才能阅读完成。

在k8s中对存储做了一次性能测试

在k8s中可以使用的存储类型有很多,但是对于中间件或数据库类型的服务来说,对存储的性能要求是很高的,对于存储类的选择该如何选择呢?此处则以性能为参考,并以本地磁盘和nas作为对比展开测试,结果呢有点奇奇怪怪~~~

存储类部署

使用terraform+ansible快速启动一个k8s集群,略

[root@master-0 ~]# kubectl get nodes
NAME       STATUS   ROLES                  AGE     VERSION
master-0   Ready    control-plane,master   5m46s   v1.22.15
worker-0   Ready    <none>                 4m46s   v1.22.15
worker-1   Ready    <none>                 4m46s   v1.22.15
worker-2   Ready    <none>                 4m46s   v1.22.15
[root@master-0 ~]# kubectl get pods -A
NAMESPACE     NAME                               READY   STATUS    RESTARTS   AGE
kube-system   coredns-78fcd69978-5n6cx           1/1     Running   0          5m32s
kube-system   coredns-78fcd69978-cx868           1/1     Running   0          5m32s
kube-system   etcd-master-0                      1/1     Running   0          5m44s
kube-system   kube-apiserver-master-0            1/1     Running   0          5m44s
kube-system   kube-controller-manager-master-0   1/1     Running   0          5m44s
kube-system   kube-flannel-ds-8l72f              1/1     Running   0          4m48s
kube-system   kube-flannel-ds-9nr48              1/1     Running   0          5m25s
kube-system   kube-flannel-ds-lhv27              1/1     Running   0          4m48s
kube-system   kube-flannel-ds-mqb6x              1/1     Running   0          4m48s
kube-system   kube-proxy-5jklw                   1/1     Running   0          5m32s
kube-system   kube-proxy-6pwz2                   1/1     Running   0          4m48s
kube-system   kube-proxy-cm875                   1/1     Running   0          4m48s
kube-system   kube-proxy-mm7nf                   1/1     Running   0          4m48s
kube-system   kube-scheduler-master-0            1/1     Running   0          5m44s

nfs存储类驱动部署

此处随便选一台部署nfs服务

[root@master-0 ~]# yum install -y rpcbind nfs-utils
[root@master-0 ~]# mkdir /nfsdata
[root@master-0 ~]# cat >/etc/exports<<-'EOF'
/nfsdata 172.16.0.0/24(rw,sync,insecure,no_root_squash)
EOF
[root@master-0 ~]# systemctl start nfs-server
[root@master-0 ~]# systemctl start rpcbind
[root@master-0 ~]# showmount -e
Export list for master-0:
/nfsdata 172.16.0.0/24

部署nfs存储类驱动

# 创建nfs rbac文件
[root@master-0 nfs-subpath]# cat >rbac.yaml<<-'EOF'
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: kube-system
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
EOF
[root@master-0 nfs-subpath]# kubectl apply -f rbac.yaml

# 创建nfs存储类驱动文件
[root@master-0 nfs-subpath]# cat >nfs-provisisoner-deploy.yaml<<-'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
spec:
  replicas: 1
  strategy: 
    ## 设置升级策略为删除再创建(默认为滚动更新)
    type: Recreate                   
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
      - name: nfs-client-provisioner
        #image: gcr.io/k8s-staging-sig-storage/nfs-subdir-external-provisioner:v4.0.0
 
        image: swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/nfs-subdir-external-provisioner:v4.0.2
        volumeMounts:
        - name: nfs-client-root
          mountPath: /persistentvolumes
        env:
 
        ## Provisioner的名称,后续设置的storageclass要和这个保持一致
        - name: PROVISIONER_NAME     
          value: nfs-client 
 
        ## NFS服务器地址,需和valumes参数中配置的保持一致
        - name: NFS_SERVER           
          value: 172.16.0.87
 
        ## NFS服务器数据存储目录,需和valumes参数中配置的保持一致
        - name: NFS_PATH             
          value: /nfsdata
        - name: ENABLE_LEADER_ELECTION
          value: "true"
      volumes:
      - name: nfs-client-root
        nfs:
 
          ## NFS服务器地址
          server: 172.16.0.87
          ## NFS服务器数据存储录       
          path: /nfsdata
EOF
[root@master-0 nfs-subpath]# kubectl apply -f nfs-provisisoner-deploy.yaml -n kube-system

# 创建nfs存储类
[root@master-0 nfs-subpath]# cat >nfs-storageclass.yaml<<-'EOF'
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
  annotations:
    ## 是否设置为默认的storageclass
    storageclass.kubernetes.io/is-default-class: "false"  
## 动态卷分配者名称,必须和上面创建的"provisioner"变量中设置的Name一致
provisioner: nfs-client                                   
parameters:
  ## 设置为"false"时删除PVC不会保留数据,"true"则保留数据
  archiveOnDelete: "true"                                 
mountOptions: 
  ## 指定为硬挂载方式
  - hard                                                  
  ## 指定NFS版本,这个需要根据NFS Server版本号设置
  - nfsvers=4                 
EOF
[root@master-0 nfs-subpath]# kubectl apply -f nfs-storageclass.yaml

local-path存储类驱动部署

创建local-path存储类文件

root@master-0 local-path]# cat >local-path-storage.yaml<<-'EOF'
apiVersion: v1
kind: Namespace
metadata:
  name: local-path-storage

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: local-path-provisioner-service-account
  namespace: local-path-storage

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: local-path-provisioner-role
rules:
  - apiGroups: [ "" ]
    resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
    verbs: [ "get", "list", "watch" ]
  - apiGroups: [ "" ]
    resources: [ "endpoints", "persistentvolumes", "pods" ]
    verbs: [ "*" ]
  - apiGroups: [ "" ]
    resources: [ "events" ]
    verbs: [ "create", "patch" ]
  - apiGroups: [ "storage.k8s.io" ]
    resources: [ "storageclasses" ]
    verbs: [ "get", "list", "watch" ]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-path-provisioner-bind
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: local-path-provisioner-role
subjects:
  - kind: ServiceAccount
    name: local-path-provisioner-service-account
    namespace: local-path-storage

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: local-path-provisioner
  namespace: local-path-storage
spec:
  replicas: 1
  selector:
    matchLabels:
      app: local-path-provisioner
  template:
    metadata:
      labels:
        app: local-path-provisioner
    spec:
      serviceAccountName: local-path-provisioner-service-account
      containers:
        - name: local-path-provisioner
          image: rancher/local-path-provisioner:master-head
          imagePullPolicy: IfNotPresent
          command:
            - local-path-provisioner
            - --debug
            - start
            - --config
            - /etc/config/config.json
          volumeMounts:
            - name: config-volume
              mountPath: /etc/config/
          env:
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
      volumes:
        - name: config-volume
          configMap:
            name: local-path-config

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: local-path-config
  namespace: local-path-storage
data:
  config.json: |-
    {
            "nodePathMap":[
            {
                    "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
                    "paths":["/opt/local-path-provisioner"]
            }
            ]
    }
  setup: |-
    #!/bin/sh
    set -eu
    mkdir -m 0777 -p "$VOL_DIR"
  teardown: |-
    #!/bin/sh
    set -eu
    rm -rf "$VOL_DIR"
  helperPod.yaml: |-
    apiVersion: v1
    kind: Pod
    metadata:
      name: helper-pod
    spec:
      containers:
      - name: helper-pod
        image: busybox
        imagePullPolicy: IfNotPresent
EOF
[root@master-0 local-path]# kubectl apply -f local-path-storage.yaml

fio测试job

查看此时的sc

[root@master-0 local-path]# kubectl get sc
NAME          PROVISIONER             RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-path    rancher.io/local-path   Delete          WaitForFirstConsumer   false                  19m
nfs-storage   nfs-client              Delete          Immediate              false                  20m

创建pvc

# 创建nfs-pvc
[root@master-0 fio-perf]# cat >nfs-pvc.yaml<<-'EOF'
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nfs-pvc
  annotations: 
    volume.beta.kubernetes.io/storage-class: "nfs-storage"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20G
EOF
kubectl apply -f nfs-pvc.yaml

# 创建local-path-pvc
[root@master-0 fio-perf]# cat >local-path-pvc.yaml<<-'EOF'
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: local-path-pvc
  annotations: 
    volume.beta.kubernetes.io/storage-class: "local-path"
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 20G
EOF
kubectl apply -f local-path-pvc.yaml

# 查看此时pv和pvc
# local-path因为绑定模式定义为WaitForFirstConsumer,所以pvc为pending,pv只会等pod调度到节点后才创建
[root@master-0 fio-perf]# kubectl get pvc
NAME             STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
local-path-pvc   Pending                                                                        local-path     3m36s
nfs-pvc          Bound     pvc-a73cac9c-16bd-40ba-be41-0b956cf3f989   20G        RWX            nfs-storage    4m37s
[root@master-0 fio-perf]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM             STORAGECLASS   REASON   AGE
pvc-a73cac9c-16bd-40ba-be41-0b956cf3f989   20G        RWX            Delete           Bound    default/nfs-pvc   nfs-storage             4m40s

创建fio测试job

# 创建nfs fio测试job
[root@master-0 fio-perf]# cat >nfs-fio-perf.yaml<<-'EOF'
---
apiVersion: batch/v1
kind: Job
metadata:
  name: nfs-fio-test
  namespace: default
spec:
  completions: 1    # 完成数
  parallelism: 1    # 并行数
  template:
    spec:
      containers:
      - name: dbench
        image: openebs/perf-test:latest
        imagePullPolicy: IfNotPresent
        env:

          ## storage mount point on which testfiles are created

          - name: DBENCH_MOUNTPOINT
            value: /data

          ##########################################################
          # I/O PROFILE COVERAGE FOR SPECIFIC PERF CHARACTERISTICS #
          ##########################################################

          ## quick: {read, write} iops, {read, write} bw (all random)
          ## detailed: {quick}, {read, write} latency & mixed 75r:25w (all random), {read, write} bw (all sequential)  
          ## custom: a single user-defined job run with params specified in env 'CUSTOM'

          - name: DBENCH_TYPE
            value: custom

          ####################################################
          # STANDARD TUNABLES FOR DBENCH_TYPE=QUICK/DETAILED #
          ####################################################

          ## active data size for the bench test

          - name: FIO_SIZE
            value: 2G

          ## use un-buffered i/o (usually O_DIRECT)

          - name: FIO_DIRECT
            value: '1'

          ## no of independent threads doing the same i/o

          - name: FIO_NUMJOBS
            value: '1'

          ## space b/w starting offsets on a file in case of parallel file i/o

          - name: FIO_OFFSET_INCREMENT
            value: 250M

          ## nature of i/o to file. commonly supported: libaio, sync, 

          - name: FIO_IOENGINE
            value: libaio

          ## additional runtime options which will be appended to the above params
          ## ensure options used are not mutually exclusive w/ above params
          ## ex: '--group_reporting=1, stonewall, --ramptime=<val> etc..,
          
          - name: OPTIONS
            value: ''

          ####################################################
          # CUSTOM JOB SPEC FOR DBENCH_TYPE=CUSTOM           #
          ####################################################

          ## this will execute a single job run with the params specified 
          ## ex: '--bs=16k --iodepth=64 --ioengine=sync --size=500M --name=custom --readwrite=randrw --rwmixread=80 --random_distribution=pareto' 

          - name: CUSTOM
            value: '--bs=4K --iodepth=128 --direct=1 --sync=1 --ioengine=libaio --size=1G --name=custom --readwrite=randread --time_based --runtime=60  --output-format=normal'
          
        volumeMounts:
        - name: dbench-pv
          mountPath: /data
      restartPolicy: Never
      volumes:
      - name: dbench-pv

        persistentVolumeClaim:
          claimName: nfs-pvc
  backoffLimit: 4
EOF

# 创建local-path fio测试job
[root@master-0 fio-perf]# cat >local-path-fio-perf.yaml<<-'EOF'
---
apiVersion: batch/v1
kind: Job
metadata:
  name: local-path-fio-test
  namespace: default
spec:
  completions: 1    # 完成数
  parallelism: 1    # 并行数
  template:
    spec:
      containers:
      - name: dbench
        image: openebs/perf-test:latest
        imagePullPolicy: IfNotPresent
        env:

          ## storage mount point on which testfiles are created

          - name: DBENCH_MOUNTPOINT
            value: /data

          ##########################################################
          # I/O PROFILE COVERAGE FOR SPECIFIC PERF CHARACTERISTICS #
          ##########################################################

          ## quick: {read, write} iops, {read, write} bw (all random)
          ## detailed: {quick}, {read, write} latency & mixed 75r:25w (all random), {read, write} bw (all sequential)  
          ## custom: a single user-defined job run with params specified in env 'CUSTOM'

          - name: DBENCH_TYPE
            value: custom

          ####################################################
          # STANDARD TUNABLES FOR DBENCH_TYPE=QUICK/DETAILED #
          ####################################################

          ## active data size for the bench test

          - name: FIO_SIZE
            value: 2G

          ## use un-buffered i/o (usually O_DIRECT)

          - name: FIO_DIRECT
            value: '1'

          ## no of independent threads doing the same i/o

          - name: FIO_NUMJOBS
            value: '1'

          ## space b/w starting offsets on a file in case of parallel file i/o

          - name: FIO_OFFSET_INCREMENT
            value: 250M

          ## nature of i/o to file. commonly supported: libaio, sync, 

          - name: FIO_IOENGINE
            value: libaio

          ## additional runtime options which will be appended to the above params
          ## ensure options used are not mutually exclusive w/ above params
          ## ex: '--group_reporting=1, stonewall, --ramptime=<val> etc..,
          
          - name: OPTIONS
            value: ''

          ####################################################
          # CUSTOM JOB SPEC FOR DBENCH_TYPE=CUSTOM           #
          ####################################################

          ## this will execute a single job run with the params specified 
          ## ex: '--bs=16k --iodepth=64 --ioengine=sync --size=500M --name=custom --readwrite=randrw --rwmixread=80 --random_distribution=pareto' 

          - name: CUSTOM
            value: '--bs=4K --iodepth=128 --direct=1 --sync=1 --ioengine=libaio --size=1G --name=custom --readwrite=randread --time_based --runtime=60  --output-format=normal'
          
        volumeMounts:
        - name: dbench-pv
          mountPath: /data
      restartPolicy: Never
      volumes:
      - name: dbench-pv
        persistentVolumeClaim:
          claimName: local-path-pvc
  backoffLimit: 4
EOF

fio测试结果

随机写IOPS测试

nfs随机写iops测试

[root@master-0 fio-perf]# kubectl logs nfs-fio-test-f9sqg 
Testing Custom I/O Profile..
custom: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=128
fio-3.13
Starting 1 process
custom: Laying out IO file (1 file / 1024MiB)

custom: (groupid=0, jobs=1): err= 0: pid=13: Sun Jul  2 11:08:45 2023
  write: IOPS=1423, BW=5693KiB/s (5830kB/s)(334MiB/60095msec); 0 zone resets
    slat (usec): min=2, max=88417, avg=10.68, stdev=302.38
    clat (msec): min=3, max=177, avg=89.92, stdev= 7.57
     lat (msec): min=3, max=177, avg=89.93, stdev= 7.57
    clat percentiles (msec):
     |  1.00th=[   64],  5.00th=[   81], 10.00th=[   84], 20.00th=[   87],
     | 30.00th=[   88], 40.00th=[   90], 50.00th=[   91], 60.00th=[   92],
     | 70.00th=[   93], 80.00th=[   94], 90.00th=[   96], 95.00th=[  100],
     | 99.00th=[  109], 99.50th=[  112], 99.90th=[  146], 99.95th=[  169],
     | 99.99th=[  176]
   bw (  KiB/s): min= 4680, max= 6664, per=99.99%, avg=5692.50, stdev=193.08, samples=120
   iops        : min= 1170, max= 1666, avg=1423.12, stdev=48.26, samples=120
  lat (msec)   : 4=0.01%, 10=0.02%, 20=0.05%, 50=0.14%, 100=95.71%
  lat (msec)   : 250=4.07%
  cpu          : usr=0.51%, sys=1.86%, ctx=32169, majf=0, minf=28
  IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=99.9%
     submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
     complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
     issued rwts: total=0,85532,0,0 short=0,0,0,0 dropped=0,0,0,0
     latency   : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
  WRITE: bw=5693KiB/s (5830kB/s), 5693KiB/s-5693KiB/s (5830kB/s-5830kB/s), io=334MiB (350MB), run=60095-60095msec

local-path随机写iops测试

[root@master-0 fio-perf]# kubectl logs local-path-fio-test-vb87d
Testing Custom I/O Profile..
custom: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=128
fio-3.13
Starting 1 process
custom: Laying out IO file (1 file / 1024MiB)

custom: (groupid=0, jobs=1): err= 0: pid=13: Sun Jul  2 11:05:31 2023
  write: IOPS=2686, BW=10.5MiB/s (11.0MB/s)(630MiB/60047msec); 0 zone resets
    slat (usec): min=7, max=85068, avg=87.83, stdev=1752.91
    clat (msec): min=3, max=121, avg=47.55, stdev=39.54
     lat (msec): min=3, max=123, avg=47.64, stdev=39.54
    clat percentiles (msec):
     |  1.00th=[    7],  5.00th=[    8], 10.00th=[    9], 20.00th=[   10],
     | 30.00th=[   11], 40.00th=[   11], 50.00th=[   14], 60.00th=[   87],
     | 70.00th=[   89], 80.00th=[   90], 90.00th=[   92], 95.00th=[   93],
     | 99.00th=[   96], 99.50th=[   97], 99.90th=[  109], 99.95th=[  113],
     | 99.99th=[  122]
   bw (  KiB/s): min= 9664, max=12416, per=100.00%, avg=10746.88, stdev=319.82, samples=120
   iops        : min= 2416, max= 3104, avg=2686.73, stdev=79.94, samples=120
  lat (msec)   : 4=0.01%, 10=30.51%, 20=21.81%, 50=0.20%, 100=47.26%
  lat (msec)   : 250=0.22%
  cpu          : usr=0.60%, sys=4.07%, ctx=5624, majf=0, minf=30
  IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=100.0%
     submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
     complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
     issued rwts: total=0,161335,0,0 short=0,0,0,0 dropped=0,0,0,0
     latency   : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
  WRITE: bw=10.5MiB/s (11.0MB/s), 10.5MiB/s-10.5MiB/s (11.0MB/s-11.0MB/s), io=630MiB (661MB), run=60047-60047msec

Disk stats (read/write):
  vda: ios=0/175519, merge=0/149288, ticks=0/1748546, in_queue=1388281, util=81.03%

随机读IOPS测试

nfs随机读IOPS

[root@master-0 fio-perf]# kubectl logs nfs-fio-test-5v6ld
Testing Custom I/O Profile..
custom: (g=0): rw=randread, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=128
fio-3.13
Starting 1 process
custom: Laying out IO file (1 file / 1024MiB)

custom: (groupid=0, jobs=1): err= 0: pid=13: Sun Jul  2 11:12:53 2023
  read: IOPS=25.1k, BW=98.2MiB/s (103MB/s)(5895MiB/60006msec)
    slat (nsec): min=1899, max=1481.6k, avg=9081.14, stdev=14854.28
    clat (usec): min=1535, max=245941, avg=5079.41, stdev=3249.77
     lat (usec): min=1542, max=245955, avg=5088.60, stdev=3249.73
    clat percentiles (usec):
     |  1.00th=[ 3752],  5.00th=[ 3916], 10.00th=[ 3982], 20.00th=[ 4113],
     | 30.00th=[ 4228], 40.00th=[ 4293], 50.00th=[ 4424], 60.00th=[ 4490],
     | 70.00th=[ 4686], 80.00th=[ 4883], 90.00th=[ 5342], 95.00th=[13698],
     | 99.00th=[14484], 99.50th=[14746], 99.90th=[15401], 99.95th=[16057],
     | 99.99th=[24249]
   bw (  KiB/s): min=52606, max=119576, per=99.99%, avg=100586.22, stdev=6918.35, samples=120
   iops        : min=13151, max=29894, avg=25146.52, stdev=1729.62, samples=120
  lat (msec)   : 2=0.01%, 4=10.79%, 10=82.95%, 20=6.24%, 50=0.01%
  lat (msec)   : 250=0.01%
  cpu          : usr=5.60%, sys=27.29%, ctx=751163, majf=0, minf=159
  IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=100.0%
     submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
     complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
     issued rwts: total=1509061,0,0,0 short=0,0,0,0 dropped=0,0,0,0
     latency   : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
   READ: bw=98.2MiB/s (103MB/s), 98.2MiB/s-98.2MiB/s (103MB/s-103MB/s), io=5895MiB (6181MB), run=60006-60006msec

local-path随机读IOPS

[root@master-0 fio-perf]# kubectl logs local-path-fio-test-kjtxv
Testing Custom I/O Profile..
custom: (g=0): rw=randread, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=libaio, iodepth=128
fio-3.13
Starting 1 process
custom: Laying out IO file (1 file / 1024MiB)

custom: (groupid=0, jobs=1): err= 0: pid=14: Sun Jul  2 11:14:14 2023
  read: IOPS=2786, BW=10.9MiB/s (11.4MB/s)(653MiB/60017msec)
    slat (usec): min=2, max=213, avg= 8.91, stdev= 4.29
    clat (usec): min=339, max=121308, avg=45922.03, stdev=42781.75
     lat (usec): min=355, max=121317, avg=45931.05, stdev=42781.44
    clat percentiles (msec):
     |  1.00th=[    6],  5.00th=[    6], 10.00th=[    6], 20.00th=[    7],
     | 30.00th=[    7], 40.00th=[    7], 50.00th=[    8], 60.00th=[   91],
     | 70.00th=[   92], 80.00th=[   93], 90.00th=[   94], 95.00th=[   94],
     | 99.00th=[   97], 99.50th=[  100], 99.90th=[  109], 99.95th=[  113],
     | 99.99th=[  115]
   bw (  KiB/s): min=10496, max=13440, per=99.94%, avg=11139.37, stdev=264.07, samples=120
   iops        : min= 2624, max= 3360, avg=2784.83, stdev=66.02, samples=120
  lat (usec)   : 500=0.01%, 750=0.01%, 1000=0.01%
  lat (msec)   : 2=0.03%, 4=0.08%, 10=53.15%, 20=0.78%, 50=0.11%
  lat (msec)   : 100=45.51%, 250=0.33%
  cpu          : usr=0.84%, sys=4.23%, ctx=100153, majf=0, minf=161
  IO depths    : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=0.1%, >=64=100.0%
     submit    : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
     complete  : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.1%
     issued rwts: total=167247,0,0,0 short=0,0,0,0 dropped=0,0,0,0
     latency   : target=0, window=0, percentile=100.00%, depth=128

Run status group 0 (all jobs):
   READ: bw=10.9MiB/s (11.4MB/s), 10.9MiB/s-10.9MiB/s (11.4MB/s-11.4MB/s), io=653MiB (685MB), run=60017-60017msec

Disk stats (read/write):
  vda: ios=166562/113, merge=0/149, ticks=7639868/3662, in_queue=6139953, util=80.18%

从上面的结果可以看到local-path随机写是nfs性能的2倍,但是离谱的随机读居然nfs的性能是local-path的10倍数!!!!用了dstat看了下

dstat查看nfs-server服务器, nfs测试随机读IOPS期间,disk read居然0?但是net send有100M左右……

----total-cpu-usage---- -dsk/total- -net/total- ---paging-- ---system--
usr sys idl wai hiq siq| read  writ| recv  send|  in   out | int   csw 
  3  30  61   0   0   5|   0    88k|5993k  109M|   0     0 |  75k  117k
  3  25  68   0   0   4|   0    64k|4674k   86M|   0     0 |  60k   95k
  1  29  64   0   0   5|   0     0 |5829k  106M|   0     0 |  73k  116k
  2  31  61   0   0   6|   0    36k|6116k  111M|   0     0 |  76k  121k
  1  30  63   0   0   6|   0     0 |5530k  101M|   0     0 |  67k  108k
  2  26  67   0   0   5|   0    48k|5276k   98M|   0     0 |  65k  106k
  1  26  69   0   0   4|   0     0 |5104k   89M|   0     0 |  65k  101k
  3  26  67   0   0   4|   0    32k|5038k   93M|   0     0 |  64k  101k
  2  32  61   0   0   6|   0    52k|6245k  108M|   0     0 |  78k  120k
  2  34  56   0   0   7|   0   120k|6574k  114M|   0     0 |  83k  127k
  3  29  62   0   0   6|   0  8192B|5555k   97M|   0     0 |  72k  110k
  3  30  61   0   0   6|   0    56k|5553k   98M|   0     0 |  68k  107k
  2  29  64   0   0   5|   0     0 |5435k   99M|   0     0 |  70k  110k
  2  30  63   0   0   5|   0    48k|5184k   89M|   0     0 |  69k  103k

dstat查看local-path测试pod所在服务器, local-path测试随机读IOPS期间,disk read为10,net send几乎可不参考

----total-cpu-usage---- -dsk/total- -net/total- ---paging-- ---system--
usr sys idl wai hiq siq| read  writ| recv  send|  in   out | int   csw 
  1   3  96   0   0   0|  11M    0 |4617B  830B|   0     0 |7747    10k
  1   2  97   0   0   0|  11M    0 |5751B   19k|   0     0 |7653    10k
  1   2  97   0   0   0|  11M    0 | 126B  424B|   0     0 |7266  9791 
  2   2  96   0   0   0|  11M    0 |5945B 7441B|   0     0 |7370    10k
  1   2  94   2   0   0|  11M   16k| 126B  424B|   0     0 |7111    10k
  1   2  97   0   0   0|  11M    0 |5046B 6569B|   0     0 |7075  9605 
  1   2  97   0   0   0|  11M    0 | 126B  424B|   0     0 |7342    10k
  1   2  97   0   0   0|  11M    0 |5088B 6603B|   0     0 |7366  9937 
  1   2  97   0   0   0|  11M    0 | 252B  544B|   0     0 |7714    10k
  1   2  97   0   0   0|  11M    0 |5046B 6561B|   0     0 |6812  9820 
  2   2  96   0   0   0|  11M    0 |  11k  887B|   0     0 |7844    11k
  1   2  97   0   0   0|  11M    0 |5050B 6569B|   0     0 |7199    10k
  1   2  97   0   0   0|  11M    0 | 168B  466B|   0     0 |7241  9826 
  2   2  96   0   0   0|  11M    0 |1596B 2159B|   0     0 |7846    11k
  1   2  97   0   0   0|  11M    0 |4993B 6403B|   0     0 |7601    11k
  1   1  97   0   0   0|  11M    0 | 126B  528B|   0     0 |7441    10k

疑问:难道nfs-server把fio压测数据文件缓存在内存里?所以不走磁盘?因为fio测试中使用direct为1即代表不走缓存,走直连磁盘io。所以可以猜测不走客户端缓存,而是走服务端缓存。该想法还没证实。。。

正文完
 
xadocker
版权声明:本站原创文章,由 xadocker 2023-07-02发表,共计19953字。
转载说明:除特殊说明外本站文章皆由CC-4.0协议发布,转载请注明出处。
评论(没有评论)