Description: rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/host rook/master}

Log: http://qa-proxy.ceph.com/teuthology/teuthology-2022-03-06_07:01:04-rados-master-distro-default-smithi/6723981/teuthology.log

Failure Reason:

hit max job timeout

  • log_href: http://qa-proxy.ceph.com/teuthology/teuthology-2022-03-06_07:01:04-rados-master-distro-default-smithi/6723981/teuthology.log
  • archive_path: /home/teuthworker/archive/teuthology-2022-03-06_07:01:04-rados-master-distro-default-smithi/6723981
  • description: rados/rook/smoke/{0-distro/ubuntu_20.04 0-kubeadm 0-nvme-loop 1-rook 2-workload/none 3-final cluster/3-node k8s/1.21 net/host rook/master}
  • duration:
  • email: ceph-qa@ceph.io
  • failure_reason: hit max job timeout
  • flavor:
  • job_id: 6723981
  • kernel:
    • kdb: True
    • sha1: distro
  • last_in_suite: False
  • machine_type: smithi
  • name: teuthology-2022-03-06_07:01:04-rados-master-distro-default-smithi
  • nuke_on_error: True
  • os_type: ubuntu
  • os_version: 20.04
  • overrides:
    • admin_socket:
      • branch: master
    • ceph:
      • conf:
        • mgr:
          • debug mgr: 20
          • debug ms: 1
        • mon:
          • debug mon: 20
          • debug ms: 1
          • debug paxos: 20
        • osd:
          • debug ms: 1
          • debug osd: 20
      • flavor: default
      • log-ignorelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • log-whitelist:
        • \(MDS_ALL_DOWN\)
        • \(MDS_UP_LESS_THAN_MAX\)
      • sha1: 6fe8e680a334072b1ba19b2de006862f98aedbae
      • spec:
        • mon:
          • allowMultiplePerNode: False
        • network:
          • provider: host
    • ceph-deploy:
      • conf:
        • client:
          • log file: /var/log/ceph/ceph-$name.$pid.log
        • mon:
          • osd default pool size: 2
    • install:
      • ceph:
        • flavor: default
        • sha1: 6fe8e680a334072b1ba19b2de006862f98aedbae
    • kernel:
      • hwe: True
    • kubeadm:
      • version: 1.21
    • rook:
      • rook_image: rook/ceph:master
    • workunit:
      • branch: master
      • sha1: b8fc4097cbbda97d59dca39eb86991e7f313985b
  • owner: scheduled_teuthology@teuthology
  • pid:
  • roles:
    • ['host.a', 'client.a']
    • ['host.b', 'client.b']
    • ['host.c', 'client.c']
  • sentry_event:
  • status: dead
  • success:
  • branch: master
  • seed:
  • sha1: 6fe8e680a334072b1ba19b2de006862f98aedbae
  • subset:
  • suite:
  • suite_branch: master
  • suite_path:
  • suite_relpath:
  • suite_repo:
  • suite_sha1: b8fc4097cbbda97d59dca39eb86991e7f313985b
  • targets:
    • smithi036.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDqKfLigCDS0mayF7uH5z6zyzRC68A8xyWgFgrnYWkdeV/RlE+yH9flW7x2oLwywDMMWg6XoaQWgi2Rr38KB0NPhbirRDnXWuAsCtA4EawVL7kSkGbDtUspHaOrcoOmo9S4fB+EQIWbc2Ur/KUfzSPyPoXfnglNIdNPOYa8mN92hZclQHoTFI5ReUG2vb0VMeML6eDKACV7hIJmgkMNl8BeEqNEmflQkt9oyNT3xsoxS8gRgpQmESgh++3avcupsROox33lCK+NpFd7ALpAkq3LfT4tRN3DSvCteDIou/Qd0AwAcnf9V6QQ25GnYk5hhlzzQOMLM4sbSRMGo3w6khbUx6N1Vb8Iz81UCcjMIBG0oLvwSqRZv7kYKT+LdX4K+o9SzSLnEYcePXmGtcLPfSFTNGEzFnecYH7Lefvo2LB1dddUmOLL3OzMCLcb3kCwrpvCHZld8gQxa55DCl02tw+zYltXbXkUnlqkIpmcige9oyDQmn+CDFBQ4t7R/nIdGkU=
    • smithi131.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCsH/juAIu+a9tj4UjR06BCtfF7QZ3ReFNfErvexJAt8M+kx0TaYa1HEujWIJlMDb5RK7pIg5wSfXUUltWiMzClu7x7+SoKogqX6xKLT7rjlTsU/Khk80hUR7oM0A8Tu5M6hE/71ONZVRKyRfn+bz4Xl9gsxqVAX7Fx2LdJfy/2FM/MTgrX2F5em7TvXacx0LcH8FoKgcjbKEGOj38/WbYfEQPcfT6SKJcloulL1AJnfZtmFkQKDT/WD2wK8ttdG+L2mNsAIVWxO9rT44Kw0ZqSY6fAepvdHxPyLiI/vQjofl+/0uSAcybitxCbZhrUL2Di8EaQWOpViYZEpStsCRS1nthqv7IcrOJKHn+wANRyu6JexQ6QXoQyOauvTChlAc52PbteFoyqN50nTmRsSzxRmy3K9MxVhSad5xvMWgwHvUFcK+PR/5Tjec+Ct3q/ImKPEeTdeK4DPPZcRHRrt10mUsdi2xiSFP0AJq71H+1soJYnrDa3n+GvcnOvDUbqRNs=
    • smithi164.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvDk+L25aKnyN1RAXc4aqhc6RP/ne0XmgYO261vJy6j2gc0SHaMkA0gj3/SaaqqwliUluXqib0RzjzJod+EkkS682ne2xiHCUntOSzcAzJNqsIdyBRNjqZH2jhfXq9JUYrY7adbKnE7HDqLIedClGlMTijtQnsTEIgdMn7zIuGWYP4Xf0LdzR441aZvAqjTCkEYT65YGXlZ2NE3LKn40D2FisOX+GiiJj+XaAEmHVHtrADFAoOiIqcv9FgtO8a3//UFTf0iDPVi1QcPHe8g9YfQWRx3rW5ZTkzKo7VuEWN4UrNFEfBk2QwBxBwUWhw1WxfSSYGJ89jAIjFT5jrhiYWSWZz0fKXf4dH8rJO5C5jXneniIL0odsTfxi6ZvJXjL6xzCpbd8rN4blvWL0m1kydJNIoow+t473BzLSLQdYmWB+iqRcsHEtnZkAebUj94sxD9EsbPF+Y/BWdEAZyajxVNbwWAjs89hT0uQAI29Cls7ETzkaxwaBGmaC8+G8fWpc=
  • tasks:
    • kubeadm:
    • nvme_loop:
    • rook:
    • rook.shell:
      • ceph -s
      • ceph orch status
      • ceph orch ps
      • ceph orch ls
      • ceph orch device ls
    • exec:
      • host.a:
        • set -ex toolbox() { kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- "$@" } orig_num_osd=`toolbox ceph osd stat | cut -f3 -d " "` toolbox ceph orch osd rm 0 --force removed_pv="" while [ "$removed_pv" = "" ] do removed_pv=`kubectl get pv | grep Released | cut -f1 -d " "` sleep 3s done target_path=`kubectl get pv $removed_pv -o jsonpath='{.spec.local.path}'` host=`echo $removed_pv | cut -f1 -d "-"` toolbox ceph orch device zap $host $target_path --force zap_completion="0" while [ "$zap_completion" = "0" ] do zap_completion=`kubectl get job -n rook-ceph rook-ceph-device-zap -o jsonpath='{.status.succeeded.path}'` sleep 3s done kubectl patch pv $removed_pv -p '{"spec":{"claimRef": null}}' toolbox ceph orch apply osd --all-available-devices kubectl delete job rook-ceph-device-zap -n rook-ceph num_osd="0" while [ "$num_osd" != "$orig_num_osd" ] do echo "waiting for osd to come back up" num_osd=`toolbox ceph osd stat | cut -f3 -d " "` sleep 30s done
    • rook.shell:
      • commands:
        • ceph orch status
        • ceph orch ps
        • ceph orch ls
        • ceph orch host ls
        • ceph orch device ls
        • ceph orch apply rgw foo
        • ceph orch apply mds foo
        • ceph orch apply rbd-mirror
        • ceph orch apply nfs foo --port 12777
  • teuthology_branch: master
  • verbose: True
  • pcp_grafana_url:
  • priority:
  • user:
  • queue:
  • posted: 2022-03-06 07:04:09
  • started: 2022-03-08 17:48:21
  • updated: 2022-03-09 00:28:41
  • status_class: danger
  • runtime: 6:40:20