ID
Status
Ceph Branch
Suite Branch
Teuthology Branch
Machine
OS
Nodes
Description
Failure Reason
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/traceless/{begin/{0-install 1-ceph 2-logrotate} clusters/fixed-2-ucephfs conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore-ec/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down} tasks/cfuse_workunit_suites_fsstress traceless/50pc}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/scrub}
Error reimaging machines: reached maximum tries (180) after waiting for 2700 seconds
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi026 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 6ddf9b80-80e0-11ed-97a3-001a4aab830c -e sha1=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/iozone}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/strays}
Test failure: test_hardlink_reintegration (tasks.cephfs.test_strays.TestStrays)
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/test_journal_migration}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/multifs-auth}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/thrash/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore/bluestore-bitmap overrides/{frag ignorelist_health ignorelist_wrongly_marked_down multifs session_timeout thrashosds-health} tasks/{1-thrash/mds 2-workunit/cfuse_workunit_snaptests}}
Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi080 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/secure wsync/yes} objectstore-ec/bluestore-comp-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/suites/pjd}}
Command failed (workunit test suites/pjd.sh) on smithi038 with status 1: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/pjd.sh'
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/workunit/quota}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/admin}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/no overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/pacific 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/no 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/multifs/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug} tasks/failover}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} msgr-failures/none objectstore-ec/bluestore-comp overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/no prefetch_entire_dirfrags/yes races session_timeout thrashosds-health} ranks/5 tasks/{1-thrash/mds 2-workunit/suites/ffsb}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi079 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 73b33714-80e1-11ed-97a3-001a4aab830c -e sha1=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/kernel_untar_build}}
Command failed on smithi074 with status 5: 'sudo systemctl stop ceph-2cfe6eec-80e1-11ed-97a3-001a4aab830c@mon.b'
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
 
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-comp-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench}
{'smithi066.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.741357', 'end': '2022-12-21 03:31:17.129355', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2022-12-21 03:31:12.387998', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2504 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2504 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}, 'smithi092.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:05.091560', 'end': '2022-12-21 03:31:17.276514', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2022-12-21 03:31:12.184954', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2504 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2504 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/5 tasks/fsstress validater/valgrind}
Command failed (workunit test suites/fsstress.sh) on smithi052 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-limits}
Test failure: test_client_cache_size (tasks.cephfs.test_client_limits.TestClientLimits)
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/client-recovery}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/blogbench}}
error during scrub thrashing: rank damage found: {'backtrace'}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/damage}
Test failure: test_open_ino_errors (tasks.cephfs.test_damage.TestDamage)
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/multiclient/{begin/{0-install 1-ceph 2-logrotate} clusters/1-mds-3-client conf/{client mds mon osd} distros/ubuntu_latest mount/fuse objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down} tasks/mdtest}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/2 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi077 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 7ef2ec7c-80e2-11ed-97a3-001a4aab830c -e sha1=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} ms_mode/crc wsync/yes} objectstore-ec/bluestore-ec-root omap_limit/10 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/default} standby-replay tasks/{0-subvolume/{no-subvolume} 1-check-counter 2-scrub/no 3-snaps/yes 4-flush/no 5-workunit/suites/dbench}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/exports}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/featureful_client/upgraded_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/yes pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-client-upgrade 4-compat_client 5-client-sanity}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_dbench_iozone}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/kclient/{base/{mount-syntax/{v1} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} ms_mode/legacy wsync/no} objectstore-ec/bluestore-bitmap omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/5 replication/always} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/ffsb}}
error during scrub thrashing: rank damage found: {'backtrace'}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/fragment}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/featureful_client/old_client/{bluestore-bitmap centos_latest clusters/1-mds-2-client-micro conf/{client mds mon osd} overrides/{ignorelist_health ignorelist_wrongly_marked_down multimds/no pg-warn} tasks/{0-octopus 1-client 2-upgrade 3-compat_client/quincy}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/yes 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi084 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 52551668-80e2-11ed-97a3-001a4aab830c -e sha1=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/mirror-ha/{begin/{0-install 1-ceph 2-logrotate} cephfs-mirror/three-per-cluster clients/{mirror} cluster/{1-node} objectstore/bluestore-bitmap overrides/{whitelist_health} supported-random-distro$/{centos_8} workloads/cephfs-mirror-ha-workunit}
reached maximum tries (50) after waiting for 300 seconds
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/valgrind/{begin/{0-install 1-ceph 2-logrotate} centos_latest debug mirror/{cephfs-mirror/one-per-cluster clients/mirror cluster/1-node mount/fuse overrides/whitelist_health tasks/mirror}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/nofs/{bluestore-bitmap centos_latest conf/{client mds mon osd} no-mds-cluster overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn} tasks/{0-pacific 1-upgrade}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
 
fs/mixed-clients/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-2c-client conf/{client mds mon osd} kclient-overrides/{distro/testing/k-testing ms-die-on-skipped} objectstore-ec/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down osd-asserts} tasks/kernel_cfuse_workunits_untarbuild_blogbench}
{'smithi017.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.768278', 'end': '2022-12-21 03:35:20.817253', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2022-12-21 03:35:16.048975', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2504 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2504 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}, 'smithi039.front.sepia.ceph.com': {'_ansible_no_log': False, 'changed': True, 'cmd': ['cpan', 'Amazon::S3'], 'delta': '0:00:04.837805', 'end': '2022-12-21 03:35:21.008432', 'invocation': {'module_args': {'_raw_params': 'cpan Amazon::S3', '_uses_shell': False, 'argv': None, 'chdir': None, 'creates': None, 'executable': None, 'removes': None, 'stdin': None, 'stdin_add_newline': True, 'strip_empty_ends': True, 'warn': True}}, 'msg': 'non-zero return code', 'rc': 25, 'start': '2022-12-21 03:35:16.170627', 'stderr': '', 'stderr_lines': [], 'stdout': "Loading internal logger. Log::Log4perl recommended for better logging\nReading '/home/ubuntu/.cpan/Metadata'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz\nReading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz\nReading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'\n Database was generated on Fri, 12 Feb 2016 02:17:02 GMT\nWarning: This index file is 2504 days old.\n Please check the host you chose as your CPAN mirror for staleness.\n I'll continue but problems seem likely to happen.\x07\n............................................................................DONE\nFetching with LWP:\nhttp://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz\nReading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'\nDONE\nWriting /home/ubuntu/.cpan/Metadata\nRunning install for module 'Amazon::S3'\n\nWarning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.\n\nThe cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.\nProceed nonetheless? [no] no\nAborted.", 'stdout_lines': ['Loading internal logger. Log::Log4perl recommended for better logging', "Reading '/home/ubuntu/.cpan/Metadata'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/authors/01mailrc.txt.gz', "Reading '/home/ubuntu/.cpan/sources/authors/01mailrc.txt.gz'", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/02packages.details.txt.gz', "Reading '/home/ubuntu/.cpan/sources/modules/02packages.details.txt.gz'", ' Database was generated on Fri, 12 Feb 2016 02:17:02 GMT', 'Warning: This index file is 2504 days old.', ' Please check the host you chose as your CPAN mirror for staleness.', " I'll continue but problems seem likely to happen.\x07", '............................................................................DONE', 'Fetching with LWP:', 'http://apt-mirror.sepia.ceph.com/CPAN/modules/03modlist.data.gz', "Reading '/home/ubuntu/.cpan/sources/modules/03modlist.data.gz'", 'DONE', 'Writing /home/ubuntu/.cpan/Metadata', "Running install for module 'Amazon::S3'", '', "Warning: checksum file '/home/ubuntu/.cpan/sources/authors/id/T/TI/TIMA/CHECKSUMS' not conforming.", '', "The cksum does not contain the key 'cpan_path' for 'Amazon-S3-0.45.tar.gz'.", 'Proceed nonetheless? [no] no', 'Aborted.']}}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{rhel_8} mount/fuse objectstore/bluestore-ec-root overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/pool-perm}
Test failure: test_pool_perm (tasks.cephfs.test_pool_perm.TestPoolPerm)
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/verify/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{k-testing mount ms-die-on-skipped} objectstore-ec/bluestore-comp overrides/{ignorelist_health ignorelist_wrongly_marked_down mon-debug session_timeout} ranks/3 tasks/fsstress validater/valgrind}
Command failed (workunit test suites/fsstress.sh) on smithi110 with status 124: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/suites/fsstress.sh'
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
ubuntu 20.04
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{ubuntu_latest} mount/fuse objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/scrub}
Test failure: test_scrub_backtrace (tasks.cephfs.test_scrub.TestScrub)
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/functional/{begin/{0-install 1-ceph 2-logrotate} clusters/1a3s-mds-4c-client conf/{client mds mon osd} distro/{centos_8} mount/kclient/{mount-syntax/{v1} mount overrides/{distro/testing/k-testing ms-die-on-skipped}} objectstore/bluestore-bitmap overrides/{ignorelist_health ignorelist_wrongly_marked_down no_client_pidfile} tasks/snap-schedule}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/workload/{0-rhel_8 begin/{0-install 1-cephadm 2-logrotate} clusters/1a11s-mds-1c-client-3node conf/{client mds mon osd} mount/fuse objectstore-ec/bluestore-comp omap_limit/10000 overrides/{cephsqlite-timeout frag ignorelist_health ignorelist_wrongly_marked_down osd-asserts session_timeout} ranks/multi/{export-check n/3 replication/default} standby-replay tasks/{0-subvolume/{with-namespace-isolated-and-quota} 1-check-counter 2-scrub/yes 3-snaps/no 4-flush/yes 5-workunit/suites/iogen}}
error during scrub thrashing: rank damage found: {'backtrace'}
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
rhel 8.6
fs/thrash/workloads/{begin/{0-install 1-ceph 2-logrotate} clusters/1a5s-mds-1c-client conf/{client mds mon osd} distro/{rhel_8} mount/kclient/{mount-syntax/{v2} mount overrides/{distro/stock/{k-stock rhel_8} ms-die-on-skipped}} msgr-failures/osd-mds-delay objectstore-ec/bluestore-comp-ec-root overrides/{frag ignorelist_health ignorelist_wrongly_marked_down prefetch_dirfrags/yes prefetch_entire_dirfrags/no races session_timeout thrashosds-health} ranks/3 tasks/{1-thrash/mds 2-workunit/fs/snaps}}
Command failed (workunit test fs/snaps/snaptest-git-ceph.sh) on smithi111 with status 128: 'mkdir -p -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && cd -- /home/ubuntu/cephtest/mnt.0/client.0/tmp && CEPH_CLI_TEST_DUP_COMMAND=1 CEPH_REF=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 TESTDIR="/home/ubuntu/cephtest" CEPH_ARGS="--cluster ceph" CEPH_ID="0" PATH=$PATH:/usr/sbin CEPH_BASE=/home/ubuntu/cephtest/clone.client.0 CEPH_ROOT=/home/ubuntu/cephtest/clone.client.0 CEPH_MNT=/home/ubuntu/cephtest/mnt.0 adjust-ulimits ceph-coverage /home/ubuntu/cephtest/archive/coverage timeout 6h /home/ubuntu/cephtest/clone.client.0/qa/workunits/fs/snaps/snaptest-git-ceph.sh'
wip-vshankar-testing-20221215.112736
wip-vshankar-testing-20221215.112736
main
smithi
centos 8.stream
fs/upgrade/mds_upgrade_sequence/{bluestore-bitmap centos_8.stream_container_tools conf/{client mds mon osd} fail_fs/yes overrides/{ignorelist_health ignorelist_wrongly_marked_down pg-warn syntax} roles tasks/{0-from/v16.2.4 1-volume/{0-create 1-ranks/1 2-allow_standby_replay/no 3-inline/yes 4-verify} 2-client 3-upgrade-mgr-staggered 4-config-upgrade/{fail_fs} 5-upgrade-with-workload 6-verify}}
Command failed on smithi132 with status 22: "sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v16.2.4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 62424866-80e2-11ed-97a3-001a4aab830c -e sha1=47f77a750cb9b297b7b8ab2c8bc3d22102dbeba8 -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr'"