From 96678554ea843b9bda3f20cd8fb2400b7cf2e114 Mon Sep 17 00:00:00 2001 From: Udo Waechter Date: Fri, 13 Mar 2020 23:20:38 +0100 Subject: [PATCH] memory consumptions limitsa --- ceph-deploy-ceph.log | 2073 ++++++++++++++++++++++++++++++++++++++++++ ceph.conf | 35 +- 2 files changed, 2095 insertions(+), 13 deletions(-) diff --git a/ceph-deploy-ceph.log b/ceph-deploy-ceph.log index 27ee2fb..56b4784 100644 --- a/ceph-deploy-ceph.log +++ b/ceph-deploy-ceph.log @@ -3725,3 +3725,2076 @@ [2020-03-07 17:14:05,315][ceph_deploy.gatherkeys][INFO ] keyring 'ceph.bootstrap-osd.keyring' already exists [2020-03-07 17:14:05,316][ceph_deploy.gatherkeys][INFO ] keyring 'ceph.bootstrap-rgw.keyring' already exists [2020-03-07 17:14:05,316][ceph_deploy.gatherkeys][INFO ] Destroy temp directory /tmp/tmpgu3Qkm +[2020-03-07 17:15:24,900][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create riot01 +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] rgw : [('riot01', 'rgw.riot01')] +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:15:24,900][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:15:24,901][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:15:24,901][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:15:24,901][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:15:24,901][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts riot01:rgw.riot01 +[2020-03-07 17:15:26,394][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:15:26,396][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:15:26,568][riot01][DEBUG ] detect machine type +[2020-03-07 17:15:26,611][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-07 17:15:26,612][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-07 17:15:26,614][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to riot01 +[2020-03-07 17:15:26,614][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:15:26,643][riot01][WARNING] rgw keyring does not exist yet, creating one +[2020-03-07 17:15:26,644][riot01][DEBUG ] create a keyring file +[2020-03-07 17:15:26,661][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-07 17:15:26,679][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.riot01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.riot01/keyring +[2020-03-07 17:15:29,069][riot01][ERROR ] Traceback (most recent call last): +[2020-03-07 17:15:29,069][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-07 17:15:29,070][riot01][ERROR ] retval = main() +[2020-03-07 17:15:29,070][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-07 17:15:29,071][riot01][ERROR ] conffile=conffile) +[2020-03-07 17:15:29,071][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-07 17:15:29,071][riot01][ERROR ] raise Exception("timed out") +[2020-03-07 17:15:29,072][riot01][ERROR ] Exception: timed out +[2020-03-07 17:15:29,072][riot01][ERROR ] exit code from command was: 1 +[2020-03-07 17:15:29,072][ceph_deploy.rgw][ERROR ] could not create rgw +[2020-03-07 17:15:29,072][ceph_deploy][ERROR ] GenericError: Failed to create 1 RGWs + +[2020-03-07 17:18:23,653][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root purge riot01 +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] host : ['riot01'] +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:18:23,653][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:18:23,653][ceph_deploy.install][INFO ] note that some dependencies *will not* be removed because they can cause issues with qemu-kvm +[2020-03-07 17:18:23,653][ceph_deploy.install][INFO ] like: librbd1 and librados2 +[2020-03-07 17:18:23,653][ceph_deploy.install][DEBUG ] Purging on cluster ceph hosts riot01 +[2020-03-07 17:18:23,653][ceph_deploy.install][DEBUG ] Detecting platform for host riot01 ... +[2020-03-07 17:18:25,165][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:18:25,166][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:18:25,333][riot01][DEBUG ] detect machine type +[2020-03-07 17:18:25,372][ceph_deploy.install][INFO ] Distro info: debian 10.3 buster +[2020-03-07 17:18:25,373][riot01][INFO ] Purging Ceph on riot01 +[2020-03-07 17:18:25,383][riot01][INFO ] Running command: env DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical apt-get --assume-yes -q -f --force-yes remove --purge ceph ceph-mds ceph-common ceph-fs-common radosgw +[2020-03-07 17:18:25,917][riot01][DEBUG ] Reading package lists... +[2020-03-07 17:18:29,750][riot01][DEBUG ] Building dependency tree... +[2020-03-07 17:18:29,750][riot01][DEBUG ] Reading state information... +[2020-03-07 17:18:31,524][riot01][DEBUG ] Package 'ceph-fs-common' is not installed, so not removed +[2020-03-07 17:18:31,524][riot01][DEBUG ] The following packages were automatically installed and are no longer required: +[2020-03-07 17:18:31,525][riot01][DEBUG ] cryptsetup-bin dmeventd gdisk hdparm libaio1 libbabeltrace1 +[2020-03-07 17:18:31,525][riot01][DEBUG ] libboost-date-time1.67.0 libboost-program-options1.67.0 +[2020-03-07 17:18:31,525][riot01][DEBUG ] libboost-python1.67.0 libboost-random1.67.0 libboost-regex1.67.0 libcephfs2 +[2020-03-07 17:18:31,526][riot01][DEBUG ] libdevmapper-event1.02.1 libdw1 libfuse2 libgoogle-perftools4 libleveldb1d +[2020-03-07 17:18:31,526][riot01][DEBUG ] liblvm2cmd2.03 liboath0 libpython3.7 libradosstriper1 librbd1 libreadline5 +[2020-03-07 17:18:31,530][riot01][DEBUG ] libtcmalloc-minimal4 lvm2 python-pastedeploy-tpl python3-asn1crypto +[2020-03-07 17:18:31,534][riot01][DEBUG ] python3-bcrypt python3-bs4 python3-ceph-argparse python3-cephfs +[2020-03-07 17:18:31,538][riot01][DEBUG ] python3-certifi python3-cffi-backend python3-chardet python3-cherrypy3 +[2020-03-07 17:18:31,540][riot01][DEBUG ] python3-cryptography python3-idna python3-jwt python3-logutils python3-mako +[2020-03-07 17:18:31,548][riot01][DEBUG ] python3-markupsafe python3-openssl python3-paste python3-pastedeploy +[2020-03-07 17:18:31,548][riot01][DEBUG ] python3-pecan python3-pkg-resources python3-prettytable python3-rados +[2020-03-07 17:18:31,550][riot01][DEBUG ] python3-rbd python3-requests python3-simplegeneric python3-singledispatch +[2020-03-07 17:18:31,552][riot01][DEBUG ] python3-soupsieve python3-tempita python3-urllib3 python3-waitress +[2020-03-07 17:18:31,568][riot01][DEBUG ] python3-webob python3-webtest python3-werkzeug xfsprogs +[2020-03-07 17:18:31,569][riot01][DEBUG ] Use 'apt autoremove' to remove them. +[2020-03-07 17:18:32,135][riot01][DEBUG ] The following packages will be REMOVED: +[2020-03-07 17:18:32,140][riot01][DEBUG ] ceph* ceph-base* ceph-common* ceph-mds* ceph-mgr* ceph-mon* ceph-osd* +[2020-03-07 17:18:32,144][riot01][DEBUG ] radosgw* +[2020-03-07 17:18:36,230][riot01][DEBUG ] [master 7f28d9f] saving uncommitted changes in /etc prior to apt run +[2020-03-07 17:18:36,230][riot01][DEBUG ] 3 files changed, 29 insertions(+) +[2020-03-07 17:18:36,230][riot01][DEBUG ] create mode 100644 ceph/ceph.conf +[2020-03-07 17:18:36,230][riot01][DEBUG ] create mode 100644 ceph/tmpMN9TB6 +[2020-03-07 17:18:38,305][riot01][DEBUG ] 0 upgraded, 0 newly installed, 8 to remove and 3 not upgraded. +[2020-03-07 17:18:38,305][riot01][DEBUG ] After this operation, 168 MB disk space will be freed. +[2020-03-07 17:18:38,671][riot01][DEBUG ] (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 58434 files and directories currently installed.) +[2020-03-07 17:18:38,688][riot01][DEBUG ] Removing ceph-mds (14.2.7-1~bpo10+1) ... +[2020-03-07 17:18:43,829][riot01][DEBUG ] Removing ceph (14.2.7-1~bpo10+1) ... +[2020-03-07 17:18:44,194][riot01][DEBUG ] Removing ceph-mgr (14.2.7-1~bpo10+1) ... +[2020-03-07 17:18:49,386][riot01][DEBUG ] Removing ceph-osd (14.2.7-1~bpo10+1) ... +[2020-03-07 17:18:55,921][riot01][DEBUG ] Removing radosgw (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:03,967][riot01][DEBUG ] Removing ceph-mon (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:09,251][riot01][DEBUG ] Removing ceph-base (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:14,535][riot01][DEBUG ] Removing ceph-common (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:20,521][riot01][DEBUG ] Processing triggers for libc-bin (2.28-10) ... +[2020-03-07 17:19:23,245][riot01][DEBUG ] (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 58012 files and directories currently installed.) +[2020-03-07 17:19:23,261][riot01][DEBUG ] Purging configuration files for radosgw (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:34,718][riot01][DEBUG ] dpkg: warning: while removing radosgw, directory '/var/lib/ceph/radosgw' not empty so not removed +[2020-03-07 17:19:34,833][riot01][DEBUG ] Purging configuration files for ceph-mon (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:43,277][riot01][DEBUG ] Purging configuration files for ceph-base (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:50,612][riot01][DEBUG ] dpkg: warning: while removing ceph-base, directory '/var/lib/ceph/bootstrap-rgw' not empty so not removed +[2020-03-07 17:19:50,726][riot01][DEBUG ] Purging configuration files for ceph-mds (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:58,067][riot01][DEBUG ] Purging configuration files for ceph (14.2.7-1~bpo10+1) ... +[2020-03-07 17:19:58,382][riot01][DEBUG ] Purging configuration files for ceph-mgr (14.2.7-1~bpo10+1) ... +[2020-03-07 17:20:05,673][riot01][DEBUG ] Purging configuration files for ceph-common (14.2.7-1~bpo10+1) ... +[2020-03-07 17:20:15,424][riot01][DEBUG ] Purging configuration files for ceph-osd (14.2.7-1~bpo10+1) ... +[2020-03-07 17:20:23,521][riot01][DEBUG ] Processing triggers for systemd (241-7~deb10u3) ... +[2020-03-07 17:20:33,323][riot01][DEBUG ] [master 0e8f3d6] committing changes in /etc made by "apt-get --assume-yes -q -f --force-yes remove --purge ceph ceph-mds ceph-common ceph-fs-common radosgw" +[2020-03-07 17:20:33,323][riot01][DEBUG ] 40 files changed, 2 insertions(+), 300 deletions(-) +[2020-03-07 17:20:33,324][riot01][DEBUG ] delete mode 100644 ceph/ceph.conf +[2020-03-07 17:20:33,327][riot01][DEBUG ] delete mode 100644 ceph/rbdmap +[2020-03-07 17:20:33,336][riot01][DEBUG ] delete mode 100644 ceph/tmpMN9TB6 +[2020-03-07 17:20:33,337][riot01][DEBUG ] delete mode 100644 ceph/tmpg_aayV +[2020-03-07 17:20:33,339][riot01][DEBUG ] delete mode 100644 default/ceph +[2020-03-07 17:20:33,341][riot01][DEBUG ] delete mode 100755 init.d/radosgw +[2020-03-07 17:20:33,345][riot01][DEBUG ] delete mode 100755 init.d/rbdmap +[2020-03-07 17:20:33,345][riot01][DEBUG ] delete mode 100644 logrotate.d/ceph-common +[2020-03-07 17:20:33,347][riot01][DEBUG ] delete mode 120000 rc0.d/K01radosgw +[2020-03-07 17:20:33,351][riot01][DEBUG ] delete mode 120000 rc0.d/K01rbdmap +[2020-03-07 17:20:33,353][riot01][DEBUG ] delete mode 120000 rc1.d/K01radosgw +[2020-03-07 17:20:33,369][riot01][DEBUG ] delete mode 120000 rc1.d/K01rbdmap +[2020-03-07 17:20:33,369][riot01][DEBUG ] delete mode 120000 rc2.d/S01radosgw +[2020-03-07 17:20:33,370][riot01][DEBUG ] delete mode 120000 rc2.d/S01rbdmap +[2020-03-07 17:20:33,378][riot01][DEBUG ] delete mode 120000 rc3.d/S01radosgw +[2020-03-07 17:20:33,378][riot01][DEBUG ] delete mode 120000 rc3.d/S01rbdmap +[2020-03-07 17:20:33,379][riot01][DEBUG ] delete mode 120000 rc4.d/S01radosgw +[2020-03-07 17:20:33,383][riot01][DEBUG ] delete mode 120000 rc4.d/S01rbdmap +[2020-03-07 17:20:33,384][riot01][DEBUG ] delete mode 120000 rc5.d/S01radosgw +[2020-03-07 17:20:33,385][riot01][DEBUG ] delete mode 120000 rc5.d/S01rbdmap +[2020-03-07 17:20:33,387][riot01][DEBUG ] delete mode 120000 rc6.d/K01radosgw +[2020-03-07 17:20:33,403][riot01][DEBUG ] delete mode 120000 rc6.d/K01rbdmap +[2020-03-07 17:20:33,404][riot01][DEBUG ] delete mode 100644 sudoers.d/ceph-osd-smartctl +[2020-03-07 17:20:33,404][riot01][DEBUG ] delete mode 100644 sysctl.d/30-ceph-osd.conf +[2020-03-07 17:20:33,412][riot01][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-crash.service +[2020-03-07 17:20:33,413][riot01][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-mds.target +[2020-03-07 17:20:33,416][riot01][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-mgr.target +[2020-03-07 17:20:33,417][riot01][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-mon.target +[2020-03-07 17:20:33,433][riot01][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-osd.target +[2020-03-07 17:20:33,434][riot01][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-radosgw.target +[2020-03-07 17:20:33,435][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-mds.target +[2020-03-07 17:20:33,435][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-mgr.target +[2020-03-07 17:20:33,435][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-mon.target +[2020-03-07 17:20:33,439][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-osd.target +[2020-03-07 17:20:33,441][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-radosgw.target +[2020-03-07 17:20:33,457][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph.target +[2020-03-07 17:20:33,458][riot01][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/rbdmap.service +[2020-03-07 17:20:33,672][riot01][WARNING] W: --force-yes is deprecated, use one of the options starting with --allow instead. +[2020-03-07 17:20:48,971][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root purgedata riot01 +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] host : ['riot01'] +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:20:48,971][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:20:48,971][ceph_deploy.install][DEBUG ] Purging data from cluster ceph hosts riot01 +[2020-03-07 17:20:50,467][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:20:50,470][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:20:50,638][riot01][DEBUG ] detect machine type +[2020-03-07 17:20:50,677][riot01][DEBUG ] find the location of an executable +[2020-03-07 17:20:52,270][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:20:52,272][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:20:52,441][riot01][DEBUG ] detect machine type +[2020-03-07 17:20:52,478][ceph_deploy.install][INFO ] Distro info: debian 10.3 buster +[2020-03-07 17:20:52,478][riot01][INFO ] purging data on riot01 +[2020-03-07 17:20:52,488][riot01][INFO ] Running command: rm -rf --one-file-system -- /var/lib/ceph +[2020-03-07 17:20:52,582][riot01][INFO ] Running command: rm -rf --one-file-system -- /etc/ceph/ +[2020-03-07 17:37:15,905][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy mon add riot01 +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] username : None +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] subcommand : add +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] mon : ['riot01'] +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] address : None +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:37:15,906][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:37:15,907][ceph_deploy.mon][INFO ] ensuring configuration of new mon host: riot01 +[2020-03-07 17:37:15,907][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-07 17:37:17,836][ceph_deploy][ERROR ] KeyboardInterrupt + +[2020-03-07 17:37:39,493][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mon add riot01 +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] subcommand : add +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:37:39,493][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:37:39,494][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:37:39,494][ceph_deploy.cli][INFO ] mon : ['riot01'] +[2020-03-07 17:37:39,494][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:37:39,494][ceph_deploy.cli][INFO ] address : None +[2020-03-07 17:37:39,494][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:37:39,494][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:37:39,494][ceph_deploy.mon][INFO ] ensuring configuration of new mon host: riot01 +[2020-03-07 17:37:39,494][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-07 17:37:41,021][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:37:41,024][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:37:41,197][riot01][DEBUG ] detect machine type +[2020-03-07 17:37:41,238][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:37:41,264][ceph_deploy.mon][DEBUG ] Adding mon to cluster ceph, host riot01 +[2020-03-07 17:37:41,269][ceph_deploy.mon][DEBUG ] using mon address by resolving host: 192.168.10.164 +[2020-03-07 17:37:41,270][ceph_deploy.mon][DEBUG ] detecting platform for host riot01 ... +[2020-03-07 17:37:42,858][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:37:42,860][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:37:43,029][riot01][DEBUG ] detect machine type +[2020-03-07 17:37:43,071][riot01][DEBUG ] find the location of an executable +[2020-03-07 17:37:43,078][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-07 17:37:43,078][riot01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-07 17:37:43,079][riot01][DEBUG ] get remote short hostname +[2020-03-07 17:37:43,085][riot01][DEBUG ] adding mon to riot01 +[2020-03-07 17:37:43,085][riot01][DEBUG ] get remote short hostname +[2020-03-07 17:37:43,103][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:37:43,124][riot01][DEBUG ] create the mon path if it does not exist +[2020-03-07 17:37:43,131][riot01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 17:37:43,137][riot01][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 17:37:43,143][riot01][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-riot01.mon.keyring +[2020-03-07 17:37:43,143][riot01][DEBUG ] create the monitor keyring file +[2020-03-07 17:37:43,161][riot01][INFO ] Running command: ceph --cluster ceph mon getmap -o /var/lib/ceph/tmp/ceph.riot01.monmap +[2020-03-07 17:37:43,949][riot01][WARNING] Traceback (most recent call last): +[2020-03-07 17:37:43,950][riot01][WARNING] File "/usr/bin/ceph", line 1266, in +[2020-03-07 17:37:43,950][riot01][WARNING] retval = main() +[2020-03-07 17:37:43,951][riot01][WARNING] File "/usr/bin/ceph", line 979, in main +[2020-03-07 17:37:43,951][riot01][WARNING] conffile=conffile) +[2020-03-07 17:37:43,951][riot01][WARNING] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-07 17:37:43,952][riot01][WARNING] raise Exception("timed out") +[2020-03-07 17:37:43,952][riot01][WARNING] Exception: timed out +[2020-03-07 17:37:44,077][riot01][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-07 17:37:44,077][ceph_deploy.mon][ERROR ] Failed to execute command: ceph --cluster ceph mon getmap -o /var/lib/ceph/tmp/ceph.riot01.monmap +[2020-03-07 17:37:44,078][ceph_deploy][ERROR ] GenericError: Failed to add monitor to host: riot01 + +[2020-03-07 17:43:31,174][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:43:31,174][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin riot01 +[2020-03-07 17:43:31,174][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] client : ['riot01'] +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:43:31,175][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:43:31,175][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-07 17:43:32,688][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:43:32,690][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:43:32,859][riot01][DEBUG ] detect machine type +[2020-03-07 17:43:32,897][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:43:42,661][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mon add riot01 +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] subcommand : add +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] mon : ['riot01'] +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] address : None +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:43:42,662][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:43:42,663][ceph_deploy.mon][INFO ] ensuring configuration of new mon host: riot01 +[2020-03-07 17:43:42,663][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-07 17:43:44,136][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:43:44,138][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:43:44,313][riot01][DEBUG ] detect machine type +[2020-03-07 17:43:44,355][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:43:44,387][ceph_deploy.mon][DEBUG ] Adding mon to cluster ceph, host riot01 +[2020-03-07 17:43:44,392][ceph_deploy.mon][DEBUG ] using mon address by resolving host: 192.168.10.164 +[2020-03-07 17:43:44,392][ceph_deploy.mon][DEBUG ] detecting platform for host riot01 ... +[2020-03-07 17:43:45,976][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:43:45,978][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:43:46,146][riot01][DEBUG ] detect machine type +[2020-03-07 17:43:46,189][riot01][DEBUG ] find the location of an executable +[2020-03-07 17:43:46,195][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-07 17:43:46,196][riot01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-07 17:43:46,196][riot01][DEBUG ] get remote short hostname +[2020-03-07 17:43:46,201][riot01][DEBUG ] adding mon to riot01 +[2020-03-07 17:43:46,202][riot01][DEBUG ] get remote short hostname +[2020-03-07 17:43:46,220][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:43:46,242][riot01][DEBUG ] create the mon path if it does not exist +[2020-03-07 17:43:46,248][riot01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 17:43:46,255][riot01][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 17:43:46,261][riot01][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-riot01.mon.keyring +[2020-03-07 17:43:46,261][riot01][DEBUG ] create the monitor keyring file +[2020-03-07 17:43:46,279][riot01][INFO ] Running command: ceph --cluster ceph mon getmap -o /var/lib/ceph/tmp/ceph.riot01.monmap +[2020-03-07 17:43:47,068][riot01][WARNING] Traceback (most recent call last): +[2020-03-07 17:43:47,069][riot01][WARNING] File "/usr/bin/ceph", line 1266, in +[2020-03-07 17:43:47,069][riot01][WARNING] retval = main() +[2020-03-07 17:43:47,070][riot01][WARNING] File "/usr/bin/ceph", line 979, in main +[2020-03-07 17:43:47,070][riot01][WARNING] conffile=conffile) +[2020-03-07 17:43:47,071][riot01][WARNING] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-07 17:43:47,071][riot01][WARNING] raise Exception("timed out") +[2020-03-07 17:43:47,071][riot01][WARNING] Exception: timed out +[2020-03-07 17:43:47,187][riot01][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-07 17:43:47,187][ceph_deploy.mon][ERROR ] Failed to execute command: ceph --cluster ceph mon getmap -o /var/lib/ceph/tmp/ceph.riot01.monmap +[2020-03-07 17:43:47,188][ceph_deploy][ERROR ] GenericError: Failed to add monitor to host: riot01 + +[2020-03-07 17:56:25,332][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] username : root +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-07 17:56:25,332][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 17:56:25,333][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 17:56:25,333][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 17:56:25,333][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny'] +[2020-03-07 17:56:25,333][ceph_deploy.cli][INFO ] func : +[2020-03-07 17:56:25,333][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 17:56:25,333][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 17:56:25,333][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-07 17:56:26,830][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 17:56:26,833][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 17:56:27,002][riot01][DEBUG ] detect machine type +[2020-03-07 17:56:27,094][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:56:27,125][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-07 17:56:34,115][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-07 17:56:34,118][pine01][DEBUG ] detect platform information from remote host +[2020-03-07 17:56:34,319][pine01][DEBUG ] detect machine type +[2020-03-07 17:56:34,450][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:56:34,505][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-07 17:56:36,353][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-07 17:56:36,355][pine02][DEBUG ] detect platform information from remote host +[2020-03-07 17:56:36,475][pine02][DEBUG ] detect machine type +[2020-03-07 17:56:36,502][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:56:36,515][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-07 17:56:39,032][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-07 17:56:39,032][ebin01][DEBUG ] detect platform information from remote host +[2020-03-07 17:56:39,244][ebin01][DEBUG ] detect machine type +[2020-03-07 17:56:39,274][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:56:39,300][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-07 17:56:42,242][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-07 17:56:42,243][ebin02][DEBUG ] detect platform information from remote host +[2020-03-07 17:56:42,452][ebin02][DEBUG ] detect machine type +[2020-03-07 17:56:42,486][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 17:56:42,509][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-07 17:56:43,078][lenny][DEBUG ] connected to host: root@lenny +[2020-03-07 17:56:43,079][lenny][DEBUG ] detect platform information from remote host +[2020-03-07 17:56:43,103][lenny][DEBUG ] detect machine type +[2020-03-07 17:56:43,107][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:00:25,262][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 18:00:25,262][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf mon create riot01 +[2020-03-07 18:00:25,272][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 18:00:25,272][ceph_deploy.cli][INFO ] username : root +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 18:00:25,273][ceph_deploy.cli][INFO ] mon : ['riot01'] +[2020-03-07 18:00:25,274][ceph_deploy.cli][INFO ] func : +[2020-03-07 18:00:25,274][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 18:00:25,274][ceph_deploy.cli][INFO ] keyrings : None +[2020-03-07 18:00:25,274][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 18:00:25,275][ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts riot01 +[2020-03-07 18:00:25,275][ceph_deploy.mon][DEBUG ] detecting platform for host riot01 ... +[2020-03-07 18:00:26,798][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 18:00:26,800][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 18:00:26,968][riot01][DEBUG ] detect machine type +[2020-03-07 18:00:27,055][riot01][DEBUG ] find the location of an executable +[2020-03-07 18:00:27,061][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-07 18:00:27,062][riot01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-07 18:00:27,062][riot01][DEBUG ] get remote short hostname +[2020-03-07 18:00:27,068][riot01][DEBUG ] deploying mon to riot01 +[2020-03-07 18:00:27,069][riot01][DEBUG ] get remote short hostname +[2020-03-07 18:00:27,074][riot01][DEBUG ] remote hostname: riot01 +[2020-03-07 18:00:27,085][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:00:27,124][riot01][DEBUG ] create the mon path if it does not exist +[2020-03-07 18:00:27,133][riot01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 18:00:27,146][riot01][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 18:00:27,153][riot01][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-riot01.mon.keyring +[2020-03-07 18:00:27,153][riot01][DEBUG ] create the monitor keyring file +[2020-03-07 18:00:27,194][riot01][INFO ] Running command: ceph-mon --cluster ceph --mkfs -i riot01 --keyring /var/lib/ceph/tmp/ceph-riot01.mon.keyring --setuser 64045 --setgroup 64045 +[2020-03-07 18:00:28,382][riot01][WARNING] 2020-03-07 18:00:28.321 afb202c0 -1 ceph-mon: error opening mon data directory at '/var/lib/ceph/mon/ceph-riot01': (13) Permission denied +[2020-03-07 18:00:28,448][riot01][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-07 18:00:28,448][ceph_deploy.mon][ERROR ] Failed to execute command: ceph-mon --cluster ceph --mkfs -i riot01 --keyring /var/lib/ceph/tmp/ceph-riot01.mon.keyring --setuser 64045 --setgroup 64045 +[2020-03-07 18:00:28,449][ceph_deploy][ERROR ] GenericError: Failed to create 1 monitors + +[2020-03-07 18:01:17,355][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 18:01:17,355][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf mon create riot01 +[2020-03-07 18:01:17,355][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 18:01:17,355][ceph_deploy.cli][INFO ] username : root +[2020-03-07 18:01:17,355][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 18:01:17,355][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] mon : ['riot01'] +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] func : +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] keyrings : None +[2020-03-07 18:01:17,356][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 18:01:17,356][ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts riot01 +[2020-03-07 18:01:17,356][ceph_deploy.mon][DEBUG ] detecting platform for host riot01 ... +[2020-03-07 18:01:18,870][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 18:01:18,872][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 18:01:19,041][riot01][DEBUG ] detect machine type +[2020-03-07 18:01:19,083][riot01][DEBUG ] find the location of an executable +[2020-03-07 18:01:19,089][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-07 18:01:19,090][riot01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-07 18:01:19,090][riot01][DEBUG ] get remote short hostname +[2020-03-07 18:01:19,096][riot01][DEBUG ] deploying mon to riot01 +[2020-03-07 18:01:19,096][riot01][DEBUG ] get remote short hostname +[2020-03-07 18:01:19,103][riot01][DEBUG ] remote hostname: riot01 +[2020-03-07 18:01:19,115][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:01:19,137][riot01][DEBUG ] create the mon path if it does not exist +[2020-03-07 18:01:19,143][riot01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 18:01:19,150][riot01][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 18:01:19,157][riot01][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-riot01.mon.keyring +[2020-03-07 18:01:19,157][riot01][DEBUG ] create the monitor keyring file +[2020-03-07 18:01:19,175][riot01][INFO ] Running command: ceph-mon --cluster ceph --mkfs -i riot01 --keyring /var/lib/ceph/tmp/ceph-riot01.mon.keyring --setuser 64045 --setgroup 64045 +[2020-03-07 18:01:19,913][riot01][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-riot01.mon.keyring +[2020-03-07 18:01:19,920][riot01][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-07 18:01:19,927][riot01][DEBUG ] create the init path if it does not exist +[2020-03-07 18:01:19,944][riot01][INFO ] Running command: systemctl enable ceph.target +[2020-03-07 18:01:22,192][riot01][INFO ] Running command: systemctl enable ceph-mon@riot01 +[2020-03-07 18:01:24,435][riot01][INFO ] Running command: systemctl start ceph-mon@riot01 +[2020-03-07 18:01:26,625][riot01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.riot01.asok mon_status +[2020-03-07 18:01:29,002][riot01][DEBUG ] ******************************************************************************** +[2020-03-07 18:01:29,003][riot01][DEBUG ] status for monitor: mon.riot01 +[2020-03-07 18:01:29,004][riot01][DEBUG ] { +[2020-03-07 18:01:29,004][riot01][DEBUG ] "election_epoch": 0, +[2020-03-07 18:01:29,004][riot01][DEBUG ] "extra_probe_peers": [ +[2020-03-07 18:01:29,004][riot01][DEBUG ] { +[2020-03-07 18:01:29,005][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:01:29,005][riot01][DEBUG ] { +[2020-03-07 18:01:29,005][riot01][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-07 18:01:29,005][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:01:29,005][riot01][DEBUG ] "type": "v2" +[2020-03-07 18:01:29,005][riot01][DEBUG ] }, +[2020-03-07 18:01:29,006][riot01][DEBUG ] { +[2020-03-07 18:01:29,006][riot01][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-07 18:01:29,006][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:01:29,006][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:01:29,007][riot01][DEBUG ] } +[2020-03-07 18:01:29,007][riot01][DEBUG ] ] +[2020-03-07 18:01:29,007][riot01][DEBUG ] }, +[2020-03-07 18:01:29,007][riot01][DEBUG ] { +[2020-03-07 18:01:29,007][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:01:29,007][riot01][DEBUG ] { +[2020-03-07 18:01:29,008][riot01][DEBUG ] "addr": "192.168.10.161:3300", +[2020-03-07 18:01:29,008][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:01:29,008][riot01][DEBUG ] "type": "v2" +[2020-03-07 18:01:29,008][riot01][DEBUG ] }, +[2020-03-07 18:01:29,008][riot01][DEBUG ] { +[2020-03-07 18:01:29,008][riot01][DEBUG ] "addr": "192.168.10.161:6789", +[2020-03-07 18:01:29,009][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:01:29,009][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:01:29,009][riot01][DEBUG ] } +[2020-03-07 18:01:29,009][riot01][DEBUG ] ] +[2020-03-07 18:01:29,009][riot01][DEBUG ] } +[2020-03-07 18:01:29,010][riot01][DEBUG ] ], +[2020-03-07 18:01:29,010][riot01][DEBUG ] "feature_map": { +[2020-03-07 18:01:29,010][riot01][DEBUG ] "mon": [ +[2020-03-07 18:01:29,010][riot01][DEBUG ] { +[2020-03-07 18:01:29,010][riot01][DEBUG ] "features": "0x3ffddff8ffacffff", +[2020-03-07 18:01:29,010][riot01][DEBUG ] "num": 1, +[2020-03-07 18:01:29,011][riot01][DEBUG ] "release": "luminous" +[2020-03-07 18:01:29,011][riot01][DEBUG ] } +[2020-03-07 18:01:29,011][riot01][DEBUG ] ] +[2020-03-07 18:01:29,011][riot01][DEBUG ] }, +[2020-03-07 18:01:29,011][riot01][DEBUG ] "features": { +[2020-03-07 18:01:29,012][riot01][DEBUG ] "quorum_con": "0", +[2020-03-07 18:01:29,012][riot01][DEBUG ] "quorum_mon": [], +[2020-03-07 18:01:29,012][riot01][DEBUG ] "required_con": "0", +[2020-03-07 18:01:29,013][riot01][DEBUG ] "required_mon": [] +[2020-03-07 18:01:29,013][riot01][DEBUG ] }, +[2020-03-07 18:01:29,013][riot01][DEBUG ] "monmap": { +[2020-03-07 18:01:29,013][riot01][DEBUG ] "created": "2020-03-07 18:01:19.539212", +[2020-03-07 18:01:29,013][riot01][DEBUG ] "epoch": 0, +[2020-03-07 18:01:29,013][riot01][DEBUG ] "features": { +[2020-03-07 18:01:29,014][riot01][DEBUG ] "optional": [], +[2020-03-07 18:01:29,014][riot01][DEBUG ] "persistent": [] +[2020-03-07 18:01:29,014][riot01][DEBUG ] }, +[2020-03-07 18:01:29,014][riot01][DEBUG ] "fsid": "29ef4020-303a-4b2e-aa24-a1e20e5ba21c", +[2020-03-07 18:01:29,015][riot01][DEBUG ] "min_mon_release": 0, +[2020-03-07 18:01:29,015][riot01][DEBUG ] "min_mon_release_name": "unknown", +[2020-03-07 18:01:29,015][riot01][DEBUG ] "modified": "2020-03-07 18:01:19.539212", +[2020-03-07 18:01:29,015][riot01][DEBUG ] "mons": [ +[2020-03-07 18:01:29,016][riot01][DEBUG ] { +[2020-03-07 18:01:29,016][riot01][DEBUG ] "addr": "0.0.0.0:0/1", +[2020-03-07 18:01:29,016][riot01][DEBUG ] "name": "pine01", +[2020-03-07 18:01:29,016][riot01][DEBUG ] "public_addr": "0.0.0.0:0/1", +[2020-03-07 18:01:29,017][riot01][DEBUG ] "public_addrs": { +[2020-03-07 18:01:29,017][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:01:29,017][riot01][DEBUG ] { +[2020-03-07 18:01:29,017][riot01][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-07 18:01:29,017][riot01][DEBUG ] "nonce": 1, +[2020-03-07 18:01:29,017][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:01:29,018][riot01][DEBUG ] } +[2020-03-07 18:01:29,018][riot01][DEBUG ] ] +[2020-03-07 18:01:29,018][riot01][DEBUG ] }, +[2020-03-07 18:01:29,018][riot01][DEBUG ] "rank": 0 +[2020-03-07 18:01:29,018][riot01][DEBUG ] }, +[2020-03-07 18:01:29,019][riot01][DEBUG ] { +[2020-03-07 18:01:29,019][riot01][DEBUG ] "addr": "0.0.0.0:0/2", +[2020-03-07 18:01:29,019][riot01][DEBUG ] "name": "pine02", +[2020-03-07 18:01:29,019][riot01][DEBUG ] "public_addr": "0.0.0.0:0/2", +[2020-03-07 18:01:29,019][riot01][DEBUG ] "public_addrs": { +[2020-03-07 18:01:29,019][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:01:29,020][riot01][DEBUG ] { +[2020-03-07 18:01:29,020][riot01][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-07 18:01:29,020][riot01][DEBUG ] "nonce": 2, +[2020-03-07 18:01:29,020][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:01:29,020][riot01][DEBUG ] } +[2020-03-07 18:01:29,020][riot01][DEBUG ] ] +[2020-03-07 18:01:29,021][riot01][DEBUG ] }, +[2020-03-07 18:01:29,021][riot01][DEBUG ] "rank": 1 +[2020-03-07 18:01:29,021][riot01][DEBUG ] } +[2020-03-07 18:01:29,021][riot01][DEBUG ] ] +[2020-03-07 18:01:29,021][riot01][DEBUG ] }, +[2020-03-07 18:01:29,021][riot01][DEBUG ] "name": "riot01", +[2020-03-07 18:01:29,022][riot01][DEBUG ] "outside_quorum": [], +[2020-03-07 18:01:29,022][riot01][DEBUG ] "quorum": [], +[2020-03-07 18:01:29,022][riot01][DEBUG ] "rank": -1, +[2020-03-07 18:01:29,022][riot01][DEBUG ] "state": "probing", +[2020-03-07 18:01:29,022][riot01][DEBUG ] "sync_provider": [] +[2020-03-07 18:01:29,023][riot01][DEBUG ] } +[2020-03-07 18:01:29,023][riot01][DEBUG ] ******************************************************************************** +[2020-03-07 18:01:29,023][riot01][INFO ] monitor: mon.riot01 is currently at the state of probing +[2020-03-07 18:01:29,042][riot01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.riot01.asok mon_status +[2020-03-07 18:01:30,062][riot01][WARNING] riot01 is not defined in `mon initial members` +[2020-03-07 18:01:30,062][riot01][WARNING] monitor riot01 does not exist in monmap +[2020-03-07 18:08:11,337][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] username : root +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 18:08:11,337][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 18:08:11,338][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny'] +[2020-03-07 18:08:11,338][ceph_deploy.cli][INFO ] func : +[2020-03-07 18:08:11,338][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 18:08:11,338][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 18:08:11,338][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-07 18:08:12,803][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 18:08:12,804][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 18:08:12,975][riot01][DEBUG ] detect machine type +[2020-03-07 18:08:13,009][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:08:13,029][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-07 18:08:14,460][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-07 18:08:14,460][pine01][DEBUG ] detect platform information from remote host +[2020-03-07 18:08:14,579][pine01][DEBUG ] detect machine type +[2020-03-07 18:08:14,694][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:08:14,920][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-07 18:08:16,450][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-07 18:08:16,450][pine02][DEBUG ] detect platform information from remote host +[2020-03-07 18:08:16,573][pine02][DEBUG ] detect machine type +[2020-03-07 18:08:16,600][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:08:16,612][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-07 18:08:18,833][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-07 18:08:18,835][ebin01][DEBUG ] detect platform information from remote host +[2020-03-07 18:08:19,044][ebin01][DEBUG ] detect machine type +[2020-03-07 18:08:19,075][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:08:19,097][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-07 18:08:22,348][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-07 18:08:22,350][ebin02][DEBUG ] detect platform information from remote host +[2020-03-07 18:08:22,645][ebin02][DEBUG ] detect machine type +[2020-03-07 18:08:22,689][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:08:22,715][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-07 18:08:23,268][lenny][DEBUG ] connected to host: root@lenny +[2020-03-07 18:08:23,268][lenny][DEBUG ] detect platform information from remote host +[2020-03-07 18:08:23,292][lenny][DEBUG ] detect machine type +[2020-03-07 18:08:23,295][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:10:14,304][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf mon create riot01 +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] username : root +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] mon : ['riot01'] +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] func : +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] keyrings : None +[2020-03-07 18:10:14,304][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 18:10:14,305][ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts riot01 +[2020-03-07 18:10:14,305][ceph_deploy.mon][DEBUG ] detecting platform for host riot01 ... +[2020-03-07 18:10:15,798][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 18:10:15,800][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 18:10:15,969][riot01][DEBUG ] detect machine type +[2020-03-07 18:10:16,007][riot01][DEBUG ] find the location of an executable +[2020-03-07 18:10:16,014][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-07 18:10:16,015][riot01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-07 18:10:16,015][riot01][DEBUG ] get remote short hostname +[2020-03-07 18:10:16,021][riot01][DEBUG ] deploying mon to riot01 +[2020-03-07 18:10:16,021][riot01][DEBUG ] get remote short hostname +[2020-03-07 18:10:16,027][riot01][DEBUG ] remote hostname: riot01 +[2020-03-07 18:10:16,040][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 18:10:16,062][riot01][DEBUG ] create the mon path if it does not exist +[2020-03-07 18:10:16,069][riot01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-riot01/done +[2020-03-07 18:10:16,075][riot01][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-07 18:10:16,082][riot01][DEBUG ] create the init path if it does not exist +[2020-03-07 18:10:16,097][riot01][INFO ] Running command: systemctl enable ceph.target +[2020-03-07 18:10:18,337][riot01][INFO ] Running command: systemctl enable ceph-mon@riot01 +[2020-03-07 18:10:20,581][riot01][INFO ] Running command: systemctl start ceph-mon@riot01 +[2020-03-07 18:10:22,721][riot01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.riot01.asok mon_status +[2020-03-07 18:10:23,591][riot01][DEBUG ] ******************************************************************************** +[2020-03-07 18:10:23,591][riot01][DEBUG ] status for monitor: mon.riot01 +[2020-03-07 18:10:23,593][riot01][DEBUG ] { +[2020-03-07 18:10:23,593][riot01][DEBUG ] "election_epoch": 0, +[2020-03-07 18:10:23,593][riot01][DEBUG ] "extra_probe_peers": [ +[2020-03-07 18:10:23,593][riot01][DEBUG ] { +[2020-03-07 18:10:23,594][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:10:23,594][riot01][DEBUG ] { +[2020-03-07 18:10:23,594][riot01][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-07 18:10:23,594][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:10:23,594][riot01][DEBUG ] "type": "v2" +[2020-03-07 18:10:23,595][riot01][DEBUG ] }, +[2020-03-07 18:10:23,595][riot01][DEBUG ] { +[2020-03-07 18:10:23,595][riot01][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-07 18:10:23,595][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:10:23,595][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:10:23,595][riot01][DEBUG ] } +[2020-03-07 18:10:23,596][riot01][DEBUG ] ] +[2020-03-07 18:10:23,596][riot01][DEBUG ] }, +[2020-03-07 18:10:23,596][riot01][DEBUG ] { +[2020-03-07 18:10:23,596][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:10:23,596][riot01][DEBUG ] { +[2020-03-07 18:10:23,596][riot01][DEBUG ] "addr": "192.168.10.161:3300", +[2020-03-07 18:10:23,597][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:10:23,597][riot01][DEBUG ] "type": "v2" +[2020-03-07 18:10:23,597][riot01][DEBUG ] }, +[2020-03-07 18:10:23,597][riot01][DEBUG ] { +[2020-03-07 18:10:23,597][riot01][DEBUG ] "addr": "192.168.10.161:6789", +[2020-03-07 18:10:23,597][riot01][DEBUG ] "nonce": 0, +[2020-03-07 18:10:23,598][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:10:23,598][riot01][DEBUG ] } +[2020-03-07 18:10:23,598][riot01][DEBUG ] ] +[2020-03-07 18:10:23,598][riot01][DEBUG ] } +[2020-03-07 18:10:23,598][riot01][DEBUG ] ], +[2020-03-07 18:10:23,598][riot01][DEBUG ] "feature_map": { +[2020-03-07 18:10:23,599][riot01][DEBUG ] "mon": [ +[2020-03-07 18:10:23,599][riot01][DEBUG ] { +[2020-03-07 18:10:23,599][riot01][DEBUG ] "features": "0x3ffddff8ffacffff", +[2020-03-07 18:10:23,599][riot01][DEBUG ] "num": 1, +[2020-03-07 18:10:23,600][riot01][DEBUG ] "release": "luminous" +[2020-03-07 18:10:23,600][riot01][DEBUG ] } +[2020-03-07 18:10:23,600][riot01][DEBUG ] ] +[2020-03-07 18:10:23,600][riot01][DEBUG ] }, +[2020-03-07 18:10:23,600][riot01][DEBUG ] "features": { +[2020-03-07 18:10:23,601][riot01][DEBUG ] "quorum_con": "0", +[2020-03-07 18:10:23,601][riot01][DEBUG ] "quorum_mon": [], +[2020-03-07 18:10:23,601][riot01][DEBUG ] "required_con": "0", +[2020-03-07 18:10:23,601][riot01][DEBUG ] "required_mon": [] +[2020-03-07 18:10:23,601][riot01][DEBUG ] }, +[2020-03-07 18:10:23,601][riot01][DEBUG ] "monmap": { +[2020-03-07 18:10:23,602][riot01][DEBUG ] "created": "2020-03-07 18:01:19.539212", +[2020-03-07 18:10:23,602][riot01][DEBUG ] "epoch": 0, +[2020-03-07 18:10:23,602][riot01][DEBUG ] "features": { +[2020-03-07 18:10:23,602][riot01][DEBUG ] "optional": [], +[2020-03-07 18:10:23,602][riot01][DEBUG ] "persistent": [] +[2020-03-07 18:10:23,602][riot01][DEBUG ] }, +[2020-03-07 18:10:23,603][riot01][DEBUG ] "fsid": "29ef4020-303a-4b2e-aa24-a1e20e5ba21c", +[2020-03-07 18:10:23,603][riot01][DEBUG ] "min_mon_release": 0, +[2020-03-07 18:10:23,603][riot01][DEBUG ] "min_mon_release_name": "unknown", +[2020-03-07 18:10:23,603][riot01][DEBUG ] "modified": "2020-03-07 18:01:19.539212", +[2020-03-07 18:10:23,603][riot01][DEBUG ] "mons": [ +[2020-03-07 18:10:23,603][riot01][DEBUG ] { +[2020-03-07 18:10:23,604][riot01][DEBUG ] "addr": "0.0.0.0:0/1", +[2020-03-07 18:10:23,604][riot01][DEBUG ] "name": "pine01", +[2020-03-07 18:10:23,604][riot01][DEBUG ] "public_addr": "0.0.0.0:0/1", +[2020-03-07 18:10:23,604][riot01][DEBUG ] "public_addrs": { +[2020-03-07 18:10:23,604][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:10:23,605][riot01][DEBUG ] { +[2020-03-07 18:10:23,605][riot01][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-07 18:10:23,605][riot01][DEBUG ] "nonce": 1, +[2020-03-07 18:10:23,605][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:10:23,605][riot01][DEBUG ] } +[2020-03-07 18:10:23,605][riot01][DEBUG ] ] +[2020-03-07 18:10:23,606][riot01][DEBUG ] }, +[2020-03-07 18:10:23,606][riot01][DEBUG ] "rank": 0 +[2020-03-07 18:10:23,606][riot01][DEBUG ] }, +[2020-03-07 18:10:23,607][riot01][DEBUG ] { +[2020-03-07 18:10:23,607][riot01][DEBUG ] "addr": "0.0.0.0:0/2", +[2020-03-07 18:10:23,607][riot01][DEBUG ] "name": "pine02", +[2020-03-07 18:10:23,607][riot01][DEBUG ] "public_addr": "0.0.0.0:0/2", +[2020-03-07 18:10:23,607][riot01][DEBUG ] "public_addrs": { +[2020-03-07 18:10:23,608][riot01][DEBUG ] "addrvec": [ +[2020-03-07 18:10:23,608][riot01][DEBUG ] { +[2020-03-07 18:10:23,608][riot01][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-07 18:10:23,609][riot01][DEBUG ] "nonce": 2, +[2020-03-07 18:10:23,609][riot01][DEBUG ] "type": "v1" +[2020-03-07 18:10:23,609][riot01][DEBUG ] } +[2020-03-07 18:10:23,609][riot01][DEBUG ] ] +[2020-03-07 18:10:23,609][riot01][DEBUG ] }, +[2020-03-07 18:10:23,610][riot01][DEBUG ] "rank": 1 +[2020-03-07 18:10:23,610][riot01][DEBUG ] } +[2020-03-07 18:10:23,610][riot01][DEBUG ] ] +[2020-03-07 18:10:23,610][riot01][DEBUG ] }, +[2020-03-07 18:10:23,610][riot01][DEBUG ] "name": "riot01", +[2020-03-07 18:10:23,611][riot01][DEBUG ] "outside_quorum": [], +[2020-03-07 18:10:23,611][riot01][DEBUG ] "quorum": [], +[2020-03-07 18:10:23,611][riot01][DEBUG ] "rank": -1, +[2020-03-07 18:10:23,611][riot01][DEBUG ] "state": "probing", +[2020-03-07 18:10:23,611][riot01][DEBUG ] "sync_provider": [] +[2020-03-07 18:10:23,611][riot01][DEBUG ] } +[2020-03-07 18:10:23,612][riot01][DEBUG ] ******************************************************************************** +[2020-03-07 18:10:23,612][riot01][INFO ] monitor: mon.riot01 is currently at the state of probing +[2020-03-07 18:10:23,627][riot01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.riot01.asok mon_status +[2020-03-07 18:10:24,498][riot01][WARNING] monitor riot01 does not exist in monmap +[2020-03-07 18:52:37,516][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin riot01 +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] username : root +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] client : ['riot01'] +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] func : +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 18:52:37,517][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 18:52:37,517][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-07 18:52:39,006][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 18:52:39,009][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 18:52:39,178][riot01][DEBUG ] detect machine type +[2020-03-07 18:52:39,221][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:04,940][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 19:06:04,940][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny +[2020-03-07 19:06:04,940][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 19:06:04,940][ceph_deploy.cli][INFO ] username : root +[2020-03-07 19:06:04,940][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny'] +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] func : +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 19:06:04,941][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 19:06:04,941][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-07 19:06:06,415][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 19:06:06,418][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 19:06:06,587][riot01][DEBUG ] detect machine type +[2020-03-07 19:06:06,625][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:06,647][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-07 19:06:13,850][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-07 19:06:13,852][pine01][DEBUG ] detect platform information from remote host +[2020-03-07 19:06:14,022][pine01][DEBUG ] detect machine type +[2020-03-07 19:06:14,222][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:14,232][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-07 19:06:15,840][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-07 19:06:15,842][pine02][DEBUG ] detect platform information from remote host +[2020-03-07 19:06:15,962][pine02][DEBUG ] detect machine type +[2020-03-07 19:06:15,987][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:15,997][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-07 19:06:18,553][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-07 19:06:18,555][ebin01][DEBUG ] detect platform information from remote host +[2020-03-07 19:06:18,768][ebin01][DEBUG ] detect machine type +[2020-03-07 19:06:18,802][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:18,824][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-07 19:06:22,141][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-07 19:06:22,143][ebin02][DEBUG ] detect platform information from remote host +[2020-03-07 19:06:22,408][ebin02][DEBUG ] detect machine type +[2020-03-07 19:06:22,450][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:22,474][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-07 19:06:22,892][lenny][DEBUG ] connected to host: root@lenny +[2020-03-07 19:06:22,893][lenny][DEBUG ] detect platform information from remote host +[2020-03-07 19:06:22,909][lenny][DEBUG ] detect machine type +[2020-03-07 19:06:22,912][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:06:59,571][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] username : root +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-07 19:06:59,571][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 19:06:59,572][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 19:06:59,572][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 19:06:59,572][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny'] +[2020-03-07 19:06:59,572][ceph_deploy.cli][INFO ] func : +[2020-03-07 19:06:59,572][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 19:06:59,572][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 19:06:59,572][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-07 19:07:01,089][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 19:07:01,091][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 19:07:01,260][riot01][DEBUG ] detect machine type +[2020-03-07 19:07:01,304][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:07:01,326][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-07 19:07:08,801][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-07 19:07:08,803][pine01][DEBUG ] detect platform information from remote host +[2020-03-07 19:07:09,080][pine01][DEBUG ] detect machine type +[2020-03-07 19:07:09,381][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:07:09,524][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-07 19:07:11,083][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-07 19:07:11,085][pine02][DEBUG ] detect platform information from remote host +[2020-03-07 19:07:11,207][pine02][DEBUG ] detect machine type +[2020-03-07 19:07:11,235][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:07:11,247][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-07 19:07:13,489][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-07 19:07:13,491][ebin01][DEBUG ] detect platform information from remote host +[2020-03-07 19:07:13,654][ebin01][DEBUG ] detect machine type +[2020-03-07 19:07:13,685][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:07:13,699][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-07 19:07:16,385][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-07 19:07:16,387][ebin02][DEBUG ] detect platform information from remote host +[2020-03-07 19:07:16,548][ebin02][DEBUG ] detect machine type +[2020-03-07 19:07:16,580][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:07:16,594][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-07 19:07:16,964][lenny][DEBUG ] connected to host: root@lenny +[2020-03-07 19:07:16,964][lenny][DEBUG ] detect platform information from remote host +[2020-03-07 19:07:16,980][lenny][DEBUG ] detect machine type +[2020-03-07 19:07:16,984][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:58:48,850][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 19:58:48,850][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny +[2020-03-07 19:58:48,850][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 19:58:48,850][ceph_deploy.cli][INFO ] username : root +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny'] +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] func : +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 19:58:48,851][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 19:58:48,851][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-07 19:58:50,337][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 19:58:50,340][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 19:58:50,509][riot01][DEBUG ] detect machine type +[2020-03-07 19:58:50,556][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:58:50,580][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-07 19:58:56,714][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-07 19:58:56,716][pine01][DEBUG ] detect platform information from remote host +[2020-03-07 19:58:56,879][pine01][DEBUG ] detect machine type +[2020-03-07 19:58:57,068][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:58:57,126][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-07 19:58:58,729][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-07 19:58:58,731][pine02][DEBUG ] detect platform information from remote host +[2020-03-07 19:58:58,852][pine02][DEBUG ] detect machine type +[2020-03-07 19:58:58,880][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:58:58,893][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-07 19:59:01,207][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-07 19:59:01,209][ebin01][DEBUG ] detect platform information from remote host +[2020-03-07 19:59:01,370][ebin01][DEBUG ] detect machine type +[2020-03-07 19:59:01,403][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:59:01,419][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-07 19:59:04,148][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-07 19:59:04,150][ebin02][DEBUG ] detect platform information from remote host +[2020-03-07 19:59:04,313][ebin02][DEBUG ] detect machine type +[2020-03-07 19:59:04,345][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 19:59:04,360][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-07 19:59:04,799][lenny][DEBUG ] connected to host: root@lenny +[2020-03-07 19:59:04,800][lenny][DEBUG ] detect platform information from remote host +[2020-03-07 19:59:04,815][lenny][DEBUG ] detect machine type +[2020-03-07 19:59:04,819][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 20:00:01,161][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create riot01 +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] username : root +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] rgw : [('riot01', 'rgw.riot01')] +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] func : +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 20:00:01,161][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 20:00:01,162][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts riot01:rgw.riot01 +[2020-03-07 20:00:02,623][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 20:00:02,625][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 20:00:02,793][riot01][DEBUG ] detect machine type +[2020-03-07 20:00:02,829][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-07 20:00:02,829][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-07 20:00:02,830][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to riot01 +[2020-03-07 20:00:02,830][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 20:00:02,856][riot01][WARNING] rgw keyring does not exist yet, creating one +[2020-03-07 20:00:02,857][riot01][DEBUG ] create a keyring file +[2020-03-07 20:00:02,871][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-07 20:00:02,889][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.riot01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.riot01/keyring +[2020-03-07 20:00:03,770][riot01][ERROR ] Traceback (most recent call last): +[2020-03-07 20:00:03,771][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-07 20:00:03,771][riot01][ERROR ] retval = main() +[2020-03-07 20:00:03,772][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-07 20:00:03,772][riot01][ERROR ] conffile=conffile) +[2020-03-07 20:00:03,772][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-07 20:00:03,772][riot01][ERROR ] raise Exception("timed out") +[2020-03-07 20:00:03,773][riot01][ERROR ] Exception: timed out +[2020-03-07 20:00:03,773][riot01][ERROR ] exit code from command was: 1 +[2020-03-07 20:00:03,773][ceph_deploy.rgw][ERROR ] could not create rgw +[2020-03-07 20:00:03,773][ceph_deploy][ERROR ] GenericError: Failed to create 1 RGWs + +[2020-03-07 20:00:12,282][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin riot01 +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] username : root +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] client : ['riot01'] +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] func : +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 20:00:12,283][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 20:00:12,283][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-07 20:00:13,814][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 20:00:13,816][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 20:00:13,985][riot01][DEBUG ] detect machine type +[2020-03-07 20:00:14,022][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 20:00:16,579][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 20:00:16,579][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create riot01 +[2020-03-07 20:00:16,579][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 20:00:16,579][ceph_deploy.cli][INFO ] username : root +[2020-03-07 20:00:16,579][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 20:00:16,579][ceph_deploy.cli][INFO ] rgw : [('riot01', 'rgw.riot01')] +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] func : +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 20:00:16,580][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 20:00:16,580][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts riot01:rgw.riot01 +[2020-03-07 20:00:18,117][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-07 20:00:18,119][riot01][DEBUG ] detect platform information from remote host +[2020-03-07 20:00:18,288][riot01][DEBUG ] detect machine type +[2020-03-07 20:00:18,328][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-07 20:00:18,328][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-07 20:00:18,330][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to riot01 +[2020-03-07 20:00:18,330][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 20:00:18,359][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-07 20:00:18,376][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.riot01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.riot01/keyring +[2020-03-07 20:00:19,257][riot01][ERROR ] Traceback (most recent call last): +[2020-03-07 20:00:19,261][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-07 20:00:19,262][riot01][ERROR ] retval = main() +[2020-03-07 20:00:19,262][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-07 20:00:19,262][riot01][ERROR ] conffile=conffile) +[2020-03-07 20:00:19,263][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-07 20:00:19,263][riot01][ERROR ] raise Exception("timed out") +[2020-03-07 20:00:19,264][riot01][ERROR ] Exception: timed out +[2020-03-07 20:00:19,264][riot01][ERROR ] exit code from command was: 1 +[2020-03-07 20:00:19,264][ceph_deploy.rgw][ERROR ] could not create rgw +[2020-03-07 20:00:19,264][ceph_deploy][ERROR ] GenericError: Failed to create 1 RGWs + +[2020-03-07 20:00:23,025][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create pine01 +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] username : root +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] rgw : [('pine01', 'rgw.pine01')] +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] func : +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 20:00:23,026][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 20:00:23,026][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts pine01:rgw.pine01 +[2020-03-07 20:00:29,546][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-07 20:00:29,548][pine01][DEBUG ] detect platform information from remote host +[2020-03-07 20:00:29,750][pine01][DEBUG ] detect machine type +[2020-03-07 20:00:29,889][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-07 20:00:29,889][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-07 20:00:29,891][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to pine01 +[2020-03-07 20:00:29,891][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 20:00:29,961][pine01][WARNING] rgw keyring does not exist yet, creating one +[2020-03-07 20:00:29,961][pine01][DEBUG ] create a keyring file +[2020-03-07 20:00:30,393][pine01][DEBUG ] create path recursively if it doesn't exist +[2020-03-07 20:00:30,522][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.pine01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.pine01/keyring +[2020-03-07 20:00:33,889][pine01][INFO ] Running command: systemctl enable ceph-radosgw@rgw.pine01 +[2020-03-07 20:00:35,063][pine01][INFO ] Running command: systemctl start ceph-radosgw@rgw.pine01 +[2020-03-07 20:00:35,286][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-07 20:00:36,214][ceph_deploy.rgw][INFO ] The Ceph Object Gateway (RGW) is now running on host pine01 and default port 7480 +[2020-03-07 20:00:42,293][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create pine02 +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] username : root +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] rgw : [('pine02', 'rgw.pine02')] +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] func : +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 20:00:42,293][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 20:00:42,293][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts pine02:rgw.pine02 +[2020-03-07 20:00:43,873][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-07 20:00:43,876][pine02][DEBUG ] detect platform information from remote host +[2020-03-07 20:00:43,995][pine02][DEBUG ] detect machine type +[2020-03-07 20:00:44,023][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-07 20:00:44,024][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-07 20:00:44,026][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to pine02 +[2020-03-07 20:00:44,026][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 20:00:44,046][pine02][WARNING] rgw keyring does not exist yet, creating one +[2020-03-07 20:00:44,047][pine02][DEBUG ] create a keyring file +[2020-03-07 20:00:44,061][pine02][DEBUG ] create path recursively if it doesn't exist +[2020-03-07 20:00:44,082][pine02][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.pine02 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.pine02/keyring +[2020-03-07 20:00:51,113][pine02][INFO ] Running command: systemctl enable ceph-radosgw@rgw.pine02 +[2020-03-07 20:00:51,800][pine02][INFO ] Running command: systemctl start ceph-radosgw@rgw.pine02 +[2020-03-07 20:00:51,883][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-07 20:00:52,709][ceph_deploy.rgw][INFO ] The Ceph Object Gateway (RGW) is now running on host pine02 and default port 7480 +[2020-03-07 22:26:38,398][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin tumor.chaos +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] username : root +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 22:26:38,399][ceph_deploy.cli][INFO ] client : ['tumor.chaos'] +[2020-03-07 22:26:38,400][ceph_deploy.cli][INFO ] func : +[2020-03-07 22:26:38,400][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 22:26:38,400][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 22:26:38,400][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to tumor.chaos +[2020-03-07 22:26:39,552][tumor.chaos][DEBUG ] connected to host: root@tumor.chaos +[2020-03-07 22:26:39,554][tumor.chaos][DEBUG ] detect platform information from remote host +[2020-03-07 22:26:39,841][tumor.chaos][DEBUG ] detect machine type +[2020-03-07 22:26:39,862][tumor.chaos][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-07 22:27:25,305][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root purge tumor.chaos +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] username : root +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] host : ['tumor.chaos'] +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] func : +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 22:27:25,306][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 22:27:25,306][ceph_deploy.install][INFO ] note that some dependencies *will not* be removed because they can cause issues with qemu-kvm +[2020-03-07 22:27:25,306][ceph_deploy.install][INFO ] like: librbd1 and librados2 +[2020-03-07 22:27:25,306][ceph_deploy.install][DEBUG ] Purging on cluster ceph hosts tumor.chaos +[2020-03-07 22:27:25,307][ceph_deploy.install][DEBUG ] Detecting platform for host tumor.chaos ... +[2020-03-07 22:27:26,297][tumor.chaos][DEBUG ] connected to host: root@tumor.chaos +[2020-03-07 22:27:26,299][tumor.chaos][DEBUG ] detect platform information from remote host +[2020-03-07 22:27:26,536][tumor.chaos][DEBUG ] detect machine type +[2020-03-07 22:27:26,556][ceph_deploy.install][INFO ] Distro info: debian 10.3 buster +[2020-03-07 22:27:26,557][tumor.chaos][INFO ] Purging Ceph on tumor.chaos +[2020-03-07 22:27:26,564][tumor.chaos][INFO ] Running command: env DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical apt-get --assume-yes -q -f --force-yes remove --purge ceph ceph-mds ceph-common ceph-fs-common radosgw +[2020-03-07 22:27:27,152][tumor.chaos][DEBUG ] Reading package lists... +[2020-03-07 22:27:28,521][tumor.chaos][DEBUG ] Building dependency tree... +[2020-03-07 22:27:28,537][tumor.chaos][DEBUG ] Reading state information... +[2020-03-07 22:27:30,257][tumor.chaos][DEBUG ] Package 'ceph-fs-common' is not installed, so not removed +[2020-03-07 22:27:30,258][tumor.chaos][DEBUG ] Package 'ceph-mds' is not installed, so not removed +[2020-03-07 22:27:30,258][tumor.chaos][DEBUG ] Package 'radosgw' is not installed, so not removed +[2020-03-07 22:27:30,258][tumor.chaos][DEBUG ] The following packages were automatically installed and are no longer required: +[2020-03-07 22:27:30,259][tumor.chaos][DEBUG ] gdisk libboost-context1.67.0 libboost-coroutine1.67.0 libboost-python1.67.0 +[2020-03-07 22:27:30,259][tumor.chaos][DEBUG ] libboost-random1.67.0 libboost-regex1.67.0 libgoogle-perftools4 liboath0 +[2020-03-07 22:27:30,259][tumor.chaos][DEBUG ] librabbitmq4 libradosstriper1 libtcmalloc-minimal4 python3-bcrypt +[2020-03-07 22:27:30,259][tumor.chaos][DEBUG ] python3-bs4 python3-ceph-argparse python3-cephfs python3-cherrypy3 +[2020-03-07 22:27:30,260][tumor.chaos][DEBUG ] python3-jwt python3-logutils python3-paste python3-pastedeploy python3-pecan +[2020-03-07 22:27:30,260][tumor.chaos][DEBUG ] python3-prettytable python3-rados python3-rbd python3-simplegeneric +[2020-03-07 22:27:30,260][tumor.chaos][DEBUG ] python3-singledispatch python3-soupsieve python3-tempita python3-waitress +[2020-03-07 22:27:30,260][tumor.chaos][DEBUG ] python3-webob python3-webtest python3-werkzeug +[2020-03-07 22:27:30,260][tumor.chaos][DEBUG ] Use 'apt autoremove' to remove them. +[2020-03-07 22:27:30,676][tumor.chaos][DEBUG ] The following packages will be REMOVED: +[2020-03-07 22:27:30,677][tumor.chaos][DEBUG ] ceph* ceph-base* ceph-common* ceph-mgr* ceph-mon* ceph-osd* +[2020-03-07 22:27:33,150][tumor.chaos][DEBUG ] [master 495f25a] saving uncommitted changes in /etc prior to apt run +[2020-03-07 22:27:33,151][tumor.chaos][DEBUG ] 4 files changed, 36 insertions(+) +[2020-03-07 22:27:33,151][tumor.chaos][DEBUG ] create mode 100644 ceph/ceph.client.admin.keyring +[2020-03-07 22:27:33,151][tumor.chaos][DEBUG ] create mode 100644 ceph/ceph.conf +[2020-03-07 22:27:33,151][tumor.chaos][DEBUG ] create mode 100644 ceph/tmpo8ylPR +[2020-03-07 22:27:34,972][tumor.chaos][DEBUG ] 0 upgraded, 0 newly installed, 6 to remove and 3 not upgraded. +[2020-03-07 22:27:34,973][tumor.chaos][DEBUG ] After this operation, 144 MB disk space will be freed. +[2020-03-07 22:27:35,238][tumor.chaos][DEBUG ] (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 146703 files and directories currently installed.) +[2020-03-07 22:27:35,254][tumor.chaos][DEBUG ] Removing ceph (14.2.7-1~bpo10+1) ... +[2020-03-07 22:27:35,419][tumor.chaos][DEBUG ] Removing ceph-mgr (14.2.7-1~bpo10+1) ... +[2020-03-07 22:27:39,050][tumor.chaos][DEBUG ] Removing ceph-osd (14.2.7-1~bpo10+1) ... +[2020-03-07 22:27:43,376][tumor.chaos][DEBUG ] Removing ceph-mon (14.2.7-1~bpo10+1) ... +[2020-03-07 22:27:47,051][tumor.chaos][DEBUG ] Removing ceph-base (14.2.7-1~bpo10+1) ... +[2020-03-07 22:27:50,626][tumor.chaos][DEBUG ] Removing ceph-common (14.2.7-1~bpo10+1) ... +[2020-03-07 22:27:54,505][tumor.chaos][DEBUG ] Processing triggers for libc-bin (2.28-10) ... +[2020-03-07 22:27:55,774][tumor.chaos][DEBUG ] (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 146298 files and directories currently installed.) +[2020-03-07 22:27:55,774][tumor.chaos][DEBUG ] Purging configuration files for ceph-mon (14.2.7-1~bpo10+1) ... +[2020-03-07 22:28:01,460][tumor.chaos][DEBUG ] Purging configuration files for ceph-base (14.2.7-1~bpo10+1) ... +[2020-03-07 22:28:06,844][tumor.chaos][DEBUG ] Purging configuration files for ceph (14.2.7-1~bpo10+1) ... +[2020-03-07 22:28:06,959][tumor.chaos][DEBUG ] Purging configuration files for ceph-mgr (14.2.7-1~bpo10+1) ... +[2020-03-07 22:28:12,292][tumor.chaos][DEBUG ] Purging configuration files for ceph-common (14.2.7-1~bpo10+1) ... +[2020-03-07 22:28:19,832][tumor.chaos][DEBUG ] Purging configuration files for ceph-osd (14.2.7-1~bpo10+1) ... +[2020-03-07 22:28:25,314][tumor.chaos][DEBUG ] Processing triggers for systemd (241-7~deb10u3) ... +[2020-03-07 22:28:29,998][tumor.chaos][DEBUG ] [master 10801ef] committing changes in /etc made by "apt-get --assume-yes -q -f --force-yes remove --purge ceph ceph-mds ceph-common ceph-fs-common radosgw" +[2020-03-07 22:28:29,999][tumor.chaos][DEBUG ] 27 files changed, 1 insertion(+), 144 deletions(-) +[2020-03-07 22:28:29,999][tumor.chaos][DEBUG ] delete mode 100644 ceph/ceph.client.admin.keyring +[2020-03-07 22:28:29,999][tumor.chaos][DEBUG ] delete mode 100644 ceph/ceph.conf +[2020-03-07 22:28:29,999][tumor.chaos][DEBUG ] delete mode 100644 ceph/rbdmap +[2020-03-07 22:28:30,000][tumor.chaos][DEBUG ] delete mode 100644 ceph/tmpo8ylPR +[2020-03-07 22:28:30,000][tumor.chaos][DEBUG ] delete mode 100644 default/ceph +[2020-03-07 22:28:30,000][tumor.chaos][DEBUG ] delete mode 100755 init.d/rbdmap +[2020-03-07 22:28:30,000][tumor.chaos][DEBUG ] delete mode 100644 logrotate.d/ceph-common +[2020-03-07 22:28:30,001][tumor.chaos][DEBUG ] delete mode 120000 rc0.d/K01rbdmap +[2020-03-07 22:28:30,001][tumor.chaos][DEBUG ] delete mode 120000 rc1.d/K01rbdmap +[2020-03-07 22:28:30,001][tumor.chaos][DEBUG ] delete mode 120000 rc2.d/S01rbdmap +[2020-03-07 22:28:30,002][tumor.chaos][DEBUG ] delete mode 120000 rc3.d/S01rbdmap +[2020-03-07 22:28:30,002][tumor.chaos][DEBUG ] delete mode 120000 rc4.d/S01rbdmap +[2020-03-07 22:28:30,002][tumor.chaos][DEBUG ] delete mode 120000 rc5.d/S01rbdmap +[2020-03-07 22:28:30,003][tumor.chaos][DEBUG ] delete mode 120000 rc6.d/K01rbdmap +[2020-03-07 22:28:30,003][tumor.chaos][DEBUG ] delete mode 100644 sudoers.d/ceph-osd-smartctl +[2020-03-07 22:28:30,003][tumor.chaos][DEBUG ] delete mode 100644 sysctl.d/30-ceph-osd.conf +[2020-03-07 22:28:30,003][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-crash.service +[2020-03-07 22:28:30,004][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-mgr.target +[2020-03-07 22:28:30,004][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-mon.target +[2020-03-07 22:28:30,005][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/ceph.target.wants/ceph-osd.target +[2020-03-07 22:28:30,005][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-mgr.target +[2020-03-07 22:28:30,006][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-mon.target +[2020-03-07 22:28:30,007][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph-osd.target +[2020-03-07 22:28:30,007][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/ceph.target +[2020-03-07 22:28:30,008][tumor.chaos][DEBUG ] delete mode 120000 systemd/system/multi-user.target.wants/rbdmap.service +[2020-03-07 22:28:30,175][tumor.chaos][WARNING] W: --force-yes is deprecated, use one of the options starting with --allow instead. +[2020-03-07 22:39:00,315][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root purgedata tumor +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] username : root +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] host : ['tumor'] +[2020-03-07 22:39:00,316][ceph_deploy.cli][INFO ] func : +[2020-03-07 22:39:00,317][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 22:39:00,317][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 22:39:00,317][ceph_deploy.install][DEBUG ] Purging data from cluster ceph hosts tumor +[2020-03-07 22:39:01,349][tumor][DEBUG ] connected to host: root@tumor +[2020-03-07 22:39:01,351][tumor][DEBUG ] detect platform information from remote host +[2020-03-07 22:39:01,532][tumor][DEBUG ] detect machine type +[2020-03-07 22:39:01,562][tumor][DEBUG ] find the location of an executable +[2020-03-07 22:39:02,542][tumor][DEBUG ] connected to host: root@tumor +[2020-03-07 22:39:02,543][tumor][DEBUG ] detect platform information from remote host +[2020-03-07 22:39:02,863][tumor][DEBUG ] detect machine type +[2020-03-07 22:39:02,886][ceph_deploy.install][INFO ] Distro info: debian 10.3 buster +[2020-03-07 22:39:02,886][tumor][INFO ] purging data on tumor +[2020-03-07 22:39:02,893][tumor][INFO ] Running command: rm -rf --one-file-system -- /var/lib/ceph +[2020-03-07 22:39:02,953][tumor][INFO ] Running command: rm -rf --one-file-system -- /etc/ceph/ +[2020-03-07 22:40:20,773][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-07 22:40:20,774][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin tumor.chaos +[2020-03-07 22:40:20,774][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] username : root +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] verbose : False +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] quiet : False +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] client : ['tumor.chaos'] +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] func : +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-07 22:40:20,775][ceph_deploy.cli][INFO ] default_release : False +[2020-03-07 22:40:20,775][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to tumor.chaos +[2020-03-07 22:40:21,757][tumor.chaos][DEBUG ] connected to host: root@tumor.chaos +[2020-03-07 22:40:21,759][tumor.chaos][DEBUG ] detect platform information from remote host +[2020-03-07 22:40:21,917][tumor.chaos][DEBUG ] detect machine type +[2020-03-07 22:40:21,938][tumor.chaos][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-08 19:38:41,142][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-08 19:38:41,152][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mds create pine01 pine02 +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] username : root +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] verbose : False +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] quiet : False +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] func : +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] mds : [('pine01', 'pine01'), ('pine02', 'pine02')] +[2020-03-08 19:38:41,153][ceph_deploy.cli][INFO ] default_release : False +[2020-03-08 19:38:41,154][ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts pine01:pine01 pine02:pine02 +[2020-03-08 19:38:49,717][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-08 19:38:49,767][pine01][DEBUG ] detect platform information from remote host +[2020-03-08 19:38:50,995][pine01][DEBUG ] detect machine type +[2020-03-08 19:38:51,018][ceph_deploy.mds][INFO ] Distro info: debian 10.3 buster +[2020-03-08 19:38:51,018][ceph_deploy.mds][DEBUG ] remote host will use systemd +[2020-03-08 19:38:51,020][ceph_deploy.mds][DEBUG ] deploying mds bootstrap to pine01 +[2020-03-08 19:38:51,020][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-08 19:38:51,403][pine01][WARNING] mds keyring does not exist yet, creating one +[2020-03-08 19:38:51,403][pine01][DEBUG ] create a keyring file +[2020-03-08 19:38:51,417][pine01][DEBUG ] create path if it doesn't exist +[2020-03-08 19:38:51,435][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.pine01 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-pine01/keyring +[2020-03-08 19:38:54,878][pine01][INFO ] Running command: systemctl enable ceph-mds@pine01 +[2020-03-08 19:38:55,051][pine01][WARNING] Created symlink /etc/systemd/system/ceph-mds.target.wants/ceph-mds@pine01.service → /lib/systemd/system/ceph-mds@.service. +[2020-03-08 19:38:55,892][pine01][INFO ] Running command: systemctl start ceph-mds@pine01 +[2020-03-08 19:38:56,173][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-08 19:38:58,733][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-08 19:38:58,735][pine02][DEBUG ] detect platform information from remote host +[2020-03-08 19:38:58,892][pine02][DEBUG ] detect machine type +[2020-03-08 19:38:58,920][ceph_deploy.mds][INFO ] Distro info: debian 10.3 buster +[2020-03-08 19:38:58,921][ceph_deploy.mds][DEBUG ] remote host will use systemd +[2020-03-08 19:38:58,921][ceph_deploy.mds][DEBUG ] deploying mds bootstrap to pine02 +[2020-03-08 19:38:58,921][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-08 19:38:58,941][pine02][WARNING] mds keyring does not exist yet, creating one +[2020-03-08 19:38:58,941][pine02][DEBUG ] create a keyring file +[2020-03-08 19:38:58,955][pine02][DEBUG ] create path if it doesn't exist +[2020-03-08 19:38:58,974][pine02][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.pine02 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-pine02/keyring +[2020-03-08 19:39:02,728][pine02][INFO ] Running command: systemctl enable ceph-mds@pine02 +[2020-03-08 19:39:02,801][pine02][WARNING] Created symlink /etc/systemd/system/ceph-mds.target.wants/ceph-mds@pine02.service → /lib/systemd/system/ceph-mds@.service. +[2020-03-08 19:39:03,477][pine02][INFO ] Running command: systemctl start ceph-mds@pine02 +[2020-03-08 19:39:03,560][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-08 20:10:31,430][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mds create pine01 +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] username : root +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] verbose : False +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] quiet : False +[2020-03-08 20:10:31,430][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-08 20:10:31,431][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-08 20:10:31,431][ceph_deploy.cli][INFO ] func : +[2020-03-08 20:10:31,431][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-08 20:10:31,431][ceph_deploy.cli][INFO ] mds : [('pine01', 'pine01')] +[2020-03-08 20:10:31,431][ceph_deploy.cli][INFO ] default_release : False +[2020-03-08 20:10:31,431][ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts pine01:pine01 +[2020-03-08 20:10:38,053][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-08 20:10:38,053][pine01][DEBUG ] detect platform information from remote host +[2020-03-08 20:10:38,225][pine01][DEBUG ] detect machine type +[2020-03-08 20:10:38,339][ceph_deploy.mds][INFO ] Distro info: debian 10.3 buster +[2020-03-08 20:10:38,339][ceph_deploy.mds][DEBUG ] remote host will use systemd +[2020-03-08 20:10:38,339][ceph_deploy.mds][DEBUG ] deploying mds bootstrap to pine01 +[2020-03-08 20:10:38,339][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-08 20:10:38,417][pine01][DEBUG ] create path if it doesn't exist +[2020-03-08 20:10:38,479][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.pine01 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-pine01/keyring +[2020-03-08 20:10:41,818][pine01][INFO ] Running command: systemctl enable ceph-mds@pine01 +[2020-03-08 20:10:42,742][pine01][INFO ] Running command: systemctl start ceph-mds@pine01 +[2020-03-08 20:10:43,075][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-08 20:14:14,144][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-08 20:14:14,144][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mds create pine02 +[2020-03-08 20:14:14,144][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] username : root +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] verbose : False +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] quiet : False +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] func : +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] mds : [('pine02', 'pine02')] +[2020-03-08 20:14:14,145][ceph_deploy.cli][INFO ] default_release : False +[2020-03-08 20:14:14,145][ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts pine02:pine02 +[2020-03-08 20:14:15,675][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-08 20:14:15,676][pine02][DEBUG ] detect platform information from remote host +[2020-03-08 20:14:15,804][pine02][DEBUG ] detect machine type +[2020-03-08 20:14:15,828][ceph_deploy.mds][INFO ] Distro info: debian 10.3 buster +[2020-03-08 20:14:15,829][ceph_deploy.mds][DEBUG ] remote host will use systemd +[2020-03-08 20:14:15,829][ceph_deploy.mds][DEBUG ] deploying mds bootstrap to pine02 +[2020-03-08 20:14:15,829][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-08 20:14:15,848][pine02][DEBUG ] create path if it doesn't exist +[2020-03-08 20:14:15,861][pine02][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.pine02 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-pine02/keyring +[2020-03-08 20:14:18,907][pine02][INFO ] Running command: systemctl enable ceph-mds@pine02 +[2020-03-08 20:14:19,634][pine02][INFO ] Running command: systemctl start ceph-mds@pine02 +[2020-03-08 20:14:19,708][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-10 21:08:17,204][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create pine01 +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] username : root +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] verbose : False +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] rgw : [('pine01', 'rgw.pine01')] +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] quiet : False +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-10 21:08:17,215][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-10 21:08:17,216][ceph_deploy.cli][INFO ] func : +[2020-03-10 21:08:17,216][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-10 21:08:17,216][ceph_deploy.cli][INFO ] default_release : False +[2020-03-10 21:08:17,216][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts pine01:rgw.pine01 +[2020-03-10 21:08:25,594][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-10 21:08:25,597][pine01][DEBUG ] detect platform information from remote host +[2020-03-10 21:08:26,516][pine01][DEBUG ] detect machine type +[2020-03-10 21:08:26,628][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-10 21:08:26,628][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-10 21:08:26,630][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to pine01 +[2020-03-10 21:08:26,630][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-10 21:08:27,130][pine01][DEBUG ] create path recursively if it doesn't exist +[2020-03-10 21:08:27,209][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.pine01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.pine01/keyring +[2020-03-10 21:08:30,770][pine01][INFO ] Running command: systemctl enable ceph-radosgw@rgw.pine01 +[2020-03-10 21:08:31,878][pine01][INFO ] Running command: systemctl start ceph-radosgw@rgw.pine01 +[2020-03-10 21:08:32,271][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-10 21:08:33,099][ceph_deploy.rgw][INFO ] The Ceph Object Gateway (RGW) is now running on host pine01 and default port 7480 +[2020-03-10 22:05:08,473][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create pine02 +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] username : root +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] verbose : False +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] rgw : [('pine02', 'rgw.pine02')] +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] quiet : False +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] func : +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-10 22:05:08,473][ceph_deploy.cli][INFO ] default_release : False +[2020-03-10 22:05:08,473][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts pine02:rgw.pine02 +[2020-03-10 22:05:10,192][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-10 22:05:10,194][pine02][DEBUG ] detect platform information from remote host +[2020-03-10 22:05:10,363][pine02][DEBUG ] detect machine type +[2020-03-10 22:05:10,384][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-10 22:05:10,384][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-10 22:05:10,386][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to pine02 +[2020-03-10 22:05:10,386][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-10 22:05:10,407][pine02][DEBUG ] create path recursively if it doesn't exist +[2020-03-10 22:05:10,419][pine02][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.pine02 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.pine02/keyring +[2020-03-10 22:05:13,675][pine02][INFO ] Running command: systemctl enable ceph-radosgw@rgw.pine02 +[2020-03-10 22:05:14,408][pine02][INFO ] Running command: systemctl start ceph-radosgw@rgw.pine02 +[2020-03-10 22:05:14,486][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-10 22:05:15,212][ceph_deploy.rgw][INFO ] The Ceph Object Gateway (RGW) is now running on host pine02 and default port 7480 +[2020-03-10 22:13:21,593][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-10 22:13:21,593][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create pine01 +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] username : root +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] verbose : False +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] rgw : [('pine01', 'rgw.pine01')] +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] quiet : False +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] func : +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-10 22:13:21,594][ceph_deploy.cli][INFO ] default_release : False +[2020-03-10 22:13:21,594][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts pine01:rgw.pine01 +[2020-03-10 22:13:30,457][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-10 22:13:30,459][pine01][DEBUG ] detect platform information from remote host +[2020-03-10 22:13:31,219][pine01][DEBUG ] detect machine type +[2020-03-10 22:13:31,342][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-10 22:13:31,343][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-10 22:13:31,392][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to pine01 +[2020-03-10 22:13:31,392][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-10 22:13:31,889][pine01][DEBUG ] create path recursively if it doesn't exist +[2020-03-10 22:13:31,958][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.pine01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.pine01/keyring +[2020-03-10 22:13:35,484][pine01][INFO ] Running command: systemctl enable ceph-radosgw@rgw.pine01 +[2020-03-10 22:13:36,648][pine01][INFO ] Running command: systemctl start ceph-radosgw@rgw.pine01 +[2020-03-10 22:13:37,113][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-10 22:13:37,838][ceph_deploy.rgw][INFO ] The Ceph Object Gateway (RGW) is now running on host pine01 and default port 7480 +[2020-03-10 22:24:20,561][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-10 22:24:20,561][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin pine03 +[2020-03-10 22:24:20,561][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] username : root +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] verbose : False +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] quiet : False +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] client : ['pine03'] +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] func : +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-10 22:24:20,562][ceph_deploy.cli][INFO ] default_release : False +[2020-03-10 22:24:20,609][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to pine03 +[2020-03-10 22:24:31,355][pine03][DEBUG ] connected to host: root@pine03 +[2020-03-10 22:24:31,357][pine03][DEBUG ] detect platform information from remote host +[2020-03-10 22:24:32,583][pine03][DEBUG ] detect machine type +[2020-03-10 22:24:32,739][pine03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-10 22:27:11,130][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-10 22:27:11,130][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin tumor +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] username : root +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] verbose : False +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] quiet : False +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] client : ['tumor'] +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] func : +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-10 22:27:11,140][ceph_deploy.cli][INFO ] default_release : False +[2020-03-10 22:27:11,141][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to tumor +[2020-03-10 22:27:12,447][tumor][DEBUG ] connected to host: root@tumor +[2020-03-10 22:27:12,449][tumor][DEBUG ] detect platform information from remote host +[2020-03-10 22:27:12,738][tumor][DEBUG ] detect machine type +[2020-03-10 22:27:12,758][tumor][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-11 20:51:02,058][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-11 20:51:02,060][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mds create pine02 +[2020-03-11 20:51:02,060][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-11 20:51:02,060][ceph_deploy.cli][INFO ] username : root +[2020-03-11 20:51:02,060][ceph_deploy.cli][INFO ] verbose : False +[2020-03-11 20:51:02,060][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-11 20:51:02,061][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-11 20:51:02,061][ceph_deploy.cli][INFO ] quiet : False +[2020-03-11 20:51:02,061][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-11 20:51:02,061][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-11 20:51:02,061][ceph_deploy.cli][INFO ] func : +[2020-03-11 20:51:02,061][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-11 20:51:02,062][ceph_deploy.cli][INFO ] mds : [('pine02', 'pine02')] +[2020-03-11 20:51:02,062][ceph_deploy.cli][INFO ] default_release : False +[2020-03-11 20:51:02,062][ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts pine02:pine02 +[2020-03-11 20:51:03,998][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-11 20:51:04,010][pine02][DEBUG ] detect platform information from remote host +[2020-03-11 20:51:04,238][pine02][DEBUG ] detect machine type +[2020-03-11 20:51:04,256][ceph_deploy.mds][INFO ] Distro info: debian 10.3 buster +[2020-03-11 20:51:04,257][ceph_deploy.mds][DEBUG ] remote host will use systemd +[2020-03-11 20:51:04,258][ceph_deploy.mds][DEBUG ] deploying mds bootstrap to pine02 +[2020-03-11 20:51:04,259][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-11 20:51:04,276][pine02][DEBUG ] create path if it doesn't exist +[2020-03-11 20:51:04,289][pine02][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.pine02 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-pine02/keyring +[2020-03-11 20:51:07,296][pine02][INFO ] Running command: systemctl enable ceph-mds@pine02 +[2020-03-11 20:51:08,286][pine02][INFO ] Running command: systemctl start ceph-mds@pine02 +[2020-03-11 20:51:08,479][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-11 20:51:13,768][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-11 20:51:13,768][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mds create pine01 +[2020-03-11 20:51:13,768][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-11 20:51:13,768][ceph_deploy.cli][INFO ] username : root +[2020-03-11 20:51:13,768][ceph_deploy.cli][INFO ] verbose : False +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] quiet : False +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] func : +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] mds : [('pine01', 'pine01')] +[2020-03-11 20:51:13,769][ceph_deploy.cli][INFO ] default_release : False +[2020-03-11 20:51:13,769][ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts pine01:pine01 +[2020-03-11 20:51:23,085][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-11 20:51:23,087][pine01][DEBUG ] detect platform information from remote host +[2020-03-11 20:51:23,495][pine01][DEBUG ] detect machine type +[2020-03-11 20:51:23,571][ceph_deploy.mds][INFO ] Distro info: debian 10.3 buster +[2020-03-11 20:51:23,571][ceph_deploy.mds][DEBUG ] remote host will use systemd +[2020-03-11 20:51:23,573][ceph_deploy.mds][DEBUG ] deploying mds bootstrap to pine01 +[2020-03-11 20:51:23,573][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-11 20:51:24,150][pine01][DEBUG ] create path if it doesn't exist +[2020-03-11 20:51:24,276][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.pine01 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-pine01/keyring +[2020-03-11 20:51:28,089][pine01][INFO ] Running command: systemctl enable ceph-mds@pine01 +[2020-03-11 20:51:29,088][pine01][INFO ] Running command: systemctl start ceph-mds@pine01 +[2020-03-11 20:51:29,545][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-13 19:10:34,252][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sda ebin01 +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] username : root +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] journal : None +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] host : ebin01 +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] filestore : None +[2020-03-13 19:10:34,253][ceph_deploy.cli][INFO ] func : +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] data : /dev/sda +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] block_db : None +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 19:10:34,254][ceph_deploy.cli][INFO ] debug : False +[2020-03-13 19:10:34,255][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda +[2020-03-13 19:10:37,605][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-13 19:10:37,618][ebin01][DEBUG ] detect platform information from remote host +[2020-03-13 19:10:37,820][ebin01][DEBUG ] detect machine type +[2020-03-13 19:10:37,843][ebin01][DEBUG ] find the location of an executable +[2020-03-13 19:10:37,848][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-13 19:10:37,848][ceph_deploy.osd][DEBUG ] Deploying osd to ebin01 +[2020-03-13 19:10:37,849][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 19:10:37,934][ebin01][DEBUG ] find the location of an executable +[2020-03-13 19:10:37,946][ebin01][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-13 19:11:08,840][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:11:08,841][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new fe454719-4b04-414d-a821-07b6ca64602d +[2020-03-13 19:11:08,841][ebin01][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f /dev/sda +[2020-03-13 19:11:08,842][ebin01][WARNING] stdout: Wiping dos signature on /dev/sda. +[2020-03-13 19:11:08,845][ebin01][WARNING] stdout: Physical volume "/dev/sda" successfully created. +[2020-03-13 19:11:08,849][ebin01][WARNING] stdout: Volume group "ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f" successfully created +[2020-03-13 19:11:08,850][ebin01][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-fe454719-4b04-414d-a821-07b6ca64602d ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f +[2020-03-13 19:11:08,866][ebin01][WARNING] stdout: Logical volume "osd-block-fe454719-4b04-414d-a821-07b6ca64602d" created. +[2020-03-13 19:11:08,867][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:11:08,868][ebin01][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4 +[2020-03-13 19:11:08,868][ebin01][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-13 19:11:08,869][ebin01][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-13 19:11:08,870][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f/osd-block-fe454719-4b04-414d-a821-07b6ca64602d +[2020-03-13 19:11:08,870][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-2 +[2020-03-13 19:11:08,871][ebin01][WARNING] Running command: /bin/ln -s /dev/ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f/osd-block-fe454719-4b04-414d-a821-07b6ca64602d /var/lib/ceph/osd/ceph-4/block +[2020-03-13 19:11:08,873][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap +[2020-03-13 19:11:08,875][ebin01][WARNING] stderr: 2020-03-13 19:10:54.350 7f7b7e61e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-13 19:11:08,876][ebin01][WARNING] 2020-03-13 19:10:54.350 7f7b7e61e0 -1 AuthRegistry(0x7f740814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-13 19:11:08,880][ebin01][WARNING] stderr: got monmap epoch 4 +[2020-03-13 19:11:08,880][ebin01][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQCgzGtesfBxNBAATydWwZgun2+jAo832p9OwA== +[2020-03-13 19:11:08,881][ebin01][WARNING] stdout: creating /var/lib/ceph/osd/ceph-4/keyring +[2020-03-13 19:11:08,883][ebin01][WARNING] added entity osd.4 auth(key=AQCgzGtesfBxNBAATydWwZgun2+jAo832p9OwA==) +[2020-03-13 19:11:08,891][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring +[2020-03-13 19:11:08,891][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/ +[2020-03-13 19:11:08,892][ebin01][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid fe454719-4b04-414d-a821-07b6ca64602d --setuser ceph --setgroup ceph +[2020-03-13 19:11:08,894][ebin01][WARNING] --> ceph-volume lvm prepare successful for: /dev/sda +[2020-03-13 19:11:08,895][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 +[2020-03-13 19:11:08,897][ebin01][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f/osd-block-fe454719-4b04-414d-a821-07b6ca64602d --path /var/lib/ceph/osd/ceph-4 --no-mon-config +[2020-03-13 19:11:08,898][ebin01][WARNING] Running command: /bin/ln -snf /dev/ceph-5dc05fd9-8537-43ef-883c-766d09dfb67f/osd-block-fe454719-4b04-414d-a821-07b6ca64602d /var/lib/ceph/osd/ceph-4/block +[2020-03-13 19:11:08,900][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block +[2020-03-13 19:11:08,901][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-2 +[2020-03-13 19:11:08,903][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 +[2020-03-13 19:11:08,904][ebin01][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-4-fe454719-4b04-414d-a821-07b6ca64602d +[2020-03-13 19:11:08,909][ebin01][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-fe454719-4b04-414d-a821-07b6ca64602d.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-13 19:11:08,909][ebin01][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@4 +[2020-03-13 19:11:08,913][ebin01][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-13 19:11:08,914][ebin01][WARNING] Running command: /bin/systemctl start ceph-osd@4 +[2020-03-13 19:11:08,914][ebin01][WARNING] --> ceph-volume lvm activate successful for osd ID: 4 +[2020-03-13 19:11:08,915][ebin01][WARNING] --> ceph-volume lvm create successful for: /dev/sda +[2020-03-13 19:11:14,081][ebin01][INFO ] checking OSD status... +[2020-03-13 19:11:14,082][ebin01][DEBUG ] find the location of an executable +[2020-03-13 19:11:14,095][ebin01][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-13 19:11:15,416][ceph_deploy.osd][DEBUG ] Host ebin01 is now ready for osd use. +[2020-03-13 19:11:26,241][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 19:11:26,242][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sdb ebin01 +[2020-03-13 19:11:26,289][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 19:11:26,289][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 19:11:26,289][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-13 19:11:26,290][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 19:11:26,290][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 19:11:26,290][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-13 19:11:26,290][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-13 19:11:26,290][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 19:11:26,291][ceph_deploy.cli][INFO ] username : root +[2020-03-13 19:11:26,291][ceph_deploy.cli][INFO ] journal : None +[2020-03-13 19:11:26,291][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-13 19:11:26,291][ceph_deploy.cli][INFO ] host : ebin01 +[2020-03-13 19:11:26,291][ceph_deploy.cli][INFO ] filestore : None +[2020-03-13 19:11:26,291][ceph_deploy.cli][INFO ] func : +[2020-03-13 19:11:26,292][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 19:11:26,292][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-13 19:11:26,292][ceph_deploy.cli][INFO ] data : /dev/sdb +[2020-03-13 19:11:26,292][ceph_deploy.cli][INFO ] block_db : None +[2020-03-13 19:11:26,292][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-13 19:11:26,292][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-13 19:11:26,293][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-13 19:11:26,293][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 19:11:26,293][ceph_deploy.cli][INFO ] debug : False +[2020-03-13 19:11:26,295][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb +[2020-03-13 19:11:32,575][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-13 19:11:32,577][ebin01][DEBUG ] detect platform information from remote host +[2020-03-13 19:11:32,994][ebin01][DEBUG ] detect machine type +[2020-03-13 19:11:33,032][ebin01][DEBUG ] find the location of an executable +[2020-03-13 19:11:33,054][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-13 19:11:33,054][ceph_deploy.osd][DEBUG ] Deploying osd to ebin01 +[2020-03-13 19:11:33,055][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 19:11:33,508][ebin01][DEBUG ] find the location of an executable +[2020-03-13 19:11:33,562][ebin01][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb +[2020-03-13 19:12:32,294][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:12:32,298][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 5f8d00a1-bc65-4202-bfc5-c0eab2aba52b +[2020-03-13 19:12:32,314][ebin01][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-f1986b95-1069-454c-ab67-e588f43407a6 /dev/sdb +[2020-03-13 19:12:32,314][ebin01][WARNING] stdout: Physical volume "/dev/sdb" successfully created. +[2020-03-13 19:12:32,319][ebin01][WARNING] stdout: Volume group "ceph-f1986b95-1069-454c-ab67-e588f43407a6" successfully created +[2020-03-13 19:12:32,335][ebin01][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b ceph-f1986b95-1069-454c-ab67-e588f43407a6 +[2020-03-13 19:12:32,343][ebin01][WARNING] stdout: Logical volume "osd-block-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b" created. +[2020-03-13 19:12:32,351][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:12:32,355][ebin01][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-5 +[2020-03-13 19:12:32,359][ebin01][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-13 19:12:32,375][ebin01][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-13 19:12:32,377][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-f1986b95-1069-454c-ab67-e588f43407a6/osd-block-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b +[2020-03-13 19:12:32,384][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-3 +[2020-03-13 19:12:32,402][ebin01][WARNING] Running command: /bin/ln -s /dev/ceph-f1986b95-1069-454c-ab67-e588f43407a6/osd-block-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b /var/lib/ceph/osd/ceph-5/block +[2020-03-13 19:12:32,418][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-5/activate.monmap +[2020-03-13 19:12:32,426][ebin01][WARNING] stderr: 2020-03-13 19:12:10.073 7f916761e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-13 19:12:32,428][ebin01][WARNING] stderr: +[2020-03-13 19:12:32,443][ebin01][WARNING] stderr: 2020-03-13 19:12:10.073 7f916761e0 -1 AuthRegistry(0x7f8c0814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-13 19:12:32,444][ebin01][WARNING] stderr: got monmap epoch 4 +[2020-03-13 19:12:32,444][ebin01][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-5/keyring --create-keyring --name osd.5 --add-key AQDfzGte6I5MAxAAlFVdK/KhPHmFopRKQxm89g== +[2020-03-13 19:12:32,444][ebin01][WARNING] stdout: creating /var/lib/ceph/osd/ceph-5/keyring +[2020-03-13 19:12:32,445][ebin01][WARNING] added entity osd.5 auth(key=AQDfzGte6I5MAxAAlFVdK/KhPHmFopRKQxm89g==) +[2020-03-13 19:12:32,453][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/keyring +[2020-03-13 19:12:32,453][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/ +[2020-03-13 19:12:32,469][ebin01][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 5 --monmap /var/lib/ceph/osd/ceph-5/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-5/ --osd-uuid 5f8d00a1-bc65-4202-bfc5-c0eab2aba52b --setuser ceph --setgroup ceph +[2020-03-13 19:12:32,469][ebin01][WARNING] --> ceph-volume lvm prepare successful for: /dev/sdb +[2020-03-13 19:12:32,470][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 +[2020-03-13 19:12:32,473][ebin01][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f1986b95-1069-454c-ab67-e588f43407a6/osd-block-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b --path /var/lib/ceph/osd/ceph-5 --no-mon-config +[2020-03-13 19:12:32,489][ebin01][WARNING] Running command: /bin/ln -snf /dev/ceph-f1986b95-1069-454c-ab67-e588f43407a6/osd-block-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b /var/lib/ceph/osd/ceph-5/block +[2020-03-13 19:12:32,497][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block +[2020-03-13 19:12:32,498][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-3 +[2020-03-13 19:12:32,506][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 +[2020-03-13 19:12:32,510][ebin01][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-5-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b +[2020-03-13 19:12:32,525][ebin01][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-5-5f8d00a1-bc65-4202-bfc5-c0eab2aba52b.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-13 19:12:32,526][ebin01][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@5 +[2020-03-13 19:12:32,534][ebin01][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@5.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-13 19:12:32,534][ebin01][WARNING] Running command: /bin/systemctl start ceph-osd@5 +[2020-03-13 19:12:32,538][ebin01][WARNING] --> ceph-volume lvm activate successful for osd ID: 5 +[2020-03-13 19:12:32,538][ebin01][WARNING] --> ceph-volume lvm create successful for: /dev/sdb +[2020-03-13 19:12:37,657][ebin01][INFO ] checking OSD status... +[2020-03-13 19:12:37,658][ebin01][DEBUG ] find the location of an executable +[2020-03-13 19:12:37,695][ebin01][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-13 19:12:41,977][ceph_deploy.osd][DEBUG ] Host ebin01 is now ready for osd use. +[2020-03-13 19:12:54,371][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sda ebin02 +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] username : root +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] journal : None +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] filestore : None +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] func : +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] data : /dev/sda +[2020-03-13 19:12:54,372][ceph_deploy.cli][INFO ] block_db : None +[2020-03-13 19:12:54,373][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-13 19:12:54,373][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-13 19:12:54,373][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-13 19:12:54,373][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 19:12:54,373][ceph_deploy.cli][INFO ] debug : False +[2020-03-13 19:12:54,373][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda +[2020-03-13 19:12:57,729][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-13 19:12:57,731][ebin02][DEBUG ] detect platform information from remote host +[2020-03-13 19:12:57,962][ebin02][DEBUG ] detect machine type +[2020-03-13 19:12:57,989][ebin02][DEBUG ] find the location of an executable +[2020-03-13 19:12:57,996][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-13 19:12:57,997][ceph_deploy.osd][DEBUG ] Deploying osd to ebin02 +[2020-03-13 19:12:57,999][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 19:12:58,042][ebin02][DEBUG ] find the location of an executable +[2020-03-13 19:12:58,058][ebin02][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-13 19:13:29,844][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:13:29,845][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 1e329b82-74f3-42b9-bc1e-8b115324d402 +[2020-03-13 19:13:29,845][ebin02][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf /dev/sda +[2020-03-13 19:13:29,845][ebin02][WARNING] stdout: Wiping dos signature on /dev/sda. +[2020-03-13 19:13:29,845][ebin02][WARNING] stdout: Physical volume "/dev/sda" successfully created. +[2020-03-13 19:13:29,861][ebin02][WARNING] stdout: Volume group "ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf" successfully created +[2020-03-13 19:13:29,862][ebin02][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-1e329b82-74f3-42b9-bc1e-8b115324d402 ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf +[2020-03-13 19:13:29,863][ebin02][WARNING] stdout: Logical volume "osd-block-1e329b82-74f3-42b9-bc1e-8b115324d402" created. +[2020-03-13 19:13:29,863][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:13:29,863][ebin02][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-6 +[2020-03-13 19:13:29,863][ebin02][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-13 19:13:29,864][ebin02][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-13 19:13:29,864][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf/osd-block-1e329b82-74f3-42b9-bc1e-8b115324d402 +[2020-03-13 19:13:29,864][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-2 +[2020-03-13 19:13:29,864][ebin02][WARNING] Running command: /bin/ln -s /dev/ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf/osd-block-1e329b82-74f3-42b9-bc1e-8b115324d402 /var/lib/ceph/osd/ceph-6/block +[2020-03-13 19:13:29,865][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-6/activate.monmap +[2020-03-13 19:13:29,881][ebin02][WARNING] stderr: 2020-03-13 19:13:13.918 7f78b631e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-13 19:13:29,881][ebin02][WARNING] stderr: 2020-03-13 19:13:13.918 7f78b631e0 -1 AuthRegistry(0x7f740814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-13 19:13:29,883][ebin02][WARNING] stderr: got monmap epoch 4 +[2020-03-13 19:13:29,883][ebin02][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-6/keyring --create-keyring --name osd.6 --add-key AQAuzWte6sTKLRAARTnzg5C6ogvte9QnmYmqsg== +[2020-03-13 19:13:29,884][ebin02][WARNING] stdout: creating /var/lib/ceph/osd/ceph-6/keyring +[2020-03-13 19:13:29,884][ebin02][WARNING] added entity osd.6 auth(key=AQAuzWte6sTKLRAARTnzg5C6ogvte9QnmYmqsg==) +[2020-03-13 19:13:29,884][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/keyring +[2020-03-13 19:13:29,885][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6/ +[2020-03-13 19:13:29,885][ebin02][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 6 --monmap /var/lib/ceph/osd/ceph-6/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-6/ --osd-uuid 1e329b82-74f3-42b9-bc1e-8b115324d402 --setuser ceph --setgroup ceph +[2020-03-13 19:13:29,885][ebin02][WARNING] --> ceph-volume lvm prepare successful for: /dev/sda +[2020-03-13 19:13:29,901][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 +[2020-03-13 19:13:29,901][ebin02][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf/osd-block-1e329b82-74f3-42b9-bc1e-8b115324d402 --path /var/lib/ceph/osd/ceph-6 --no-mon-config +[2020-03-13 19:13:29,902][ebin02][WARNING] Running command: /bin/ln -snf /dev/ceph-ed4c22ce-df90-4f83-861f-3e071de52ccf/osd-block-1e329b82-74f3-42b9-bc1e-8b115324d402 /var/lib/ceph/osd/ceph-6/block +[2020-03-13 19:13:29,902][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block +[2020-03-13 19:13:29,902][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-2 +[2020-03-13 19:13:29,903][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 +[2020-03-13 19:13:29,910][ebin02][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-6-1e329b82-74f3-42b9-bc1e-8b115324d402 +[2020-03-13 19:13:29,911][ebin02][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-6-1e329b82-74f3-42b9-bc1e-8b115324d402.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-13 19:13:29,911][ebin02][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@6 +[2020-03-13 19:13:29,911][ebin02][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@6.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-13 19:13:29,912][ebin02][WARNING] Running command: /bin/systemctl start ceph-osd@6 +[2020-03-13 19:13:29,928][ebin02][WARNING] --> ceph-volume lvm activate successful for osd ID: 6 +[2020-03-13 19:13:29,928][ebin02][WARNING] --> ceph-volume lvm create successful for: /dev/sda +[2020-03-13 19:13:34,997][ebin02][INFO ] checking OSD status... +[2020-03-13 19:13:34,998][ebin02][DEBUG ] find the location of an executable +[2020-03-13 19:13:35,032][ebin02][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-13 19:13:36,554][ceph_deploy.osd][DEBUG ] Host ebin02 is now ready for osd use. +[2020-03-13 19:13:54,082][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sdb ebin02 +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] username : root +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] journal : None +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] filestore : None +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] func : +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 19:13:54,083][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] data : /dev/sdb +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] block_db : None +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 19:13:54,084][ceph_deploy.cli][INFO ] debug : False +[2020-03-13 19:13:54,084][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb +[2020-03-13 19:13:57,070][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-13 19:13:57,072][ebin02][DEBUG ] detect platform information from remote host +[2020-03-13 19:13:57,299][ebin02][DEBUG ] detect machine type +[2020-03-13 19:13:57,327][ebin02][DEBUG ] find the location of an executable +[2020-03-13 19:13:57,332][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-13 19:13:57,332][ceph_deploy.osd][DEBUG ] Deploying osd to ebin02 +[2020-03-13 19:13:57,334][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 19:13:57,734][ebin02][DEBUG ] find the location of an executable +[2020-03-13 19:13:57,747][ebin02][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb +[2020-03-13 19:14:28,543][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:14:28,543][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 24c2823e-d63b-4b1f-9c83-76cae58bbb38 +[2020-03-13 19:14:28,544][ebin02][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-4e945599-23d6-4222-bd1f-017a4a869912 /dev/sdb +[2020-03-13 19:14:28,544][ebin02][WARNING] stdout: Physical volume "/dev/sdb" successfully created. +[2020-03-13 19:14:28,544][ebin02][WARNING] stdout: Volume group "ceph-4e945599-23d6-4222-bd1f-017a4a869912" successfully created +[2020-03-13 19:14:28,544][ebin02][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-24c2823e-d63b-4b1f-9c83-76cae58bbb38 ceph-4e945599-23d6-4222-bd1f-017a4a869912 +[2020-03-13 19:14:28,545][ebin02][WARNING] stdout: Logical volume "osd-block-24c2823e-d63b-4b1f-9c83-76cae58bbb38" created. +[2020-03-13 19:14:28,545][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-13 19:14:28,545][ebin02][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-7 +[2020-03-13 19:14:28,546][ebin02][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-13 19:14:28,546][ebin02][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-13 19:14:28,546][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-4e945599-23d6-4222-bd1f-017a4a869912/osd-block-24c2823e-d63b-4b1f-9c83-76cae58bbb38 +[2020-03-13 19:14:28,546][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-3 +[2020-03-13 19:14:28,546][ebin02][WARNING] Running command: /bin/ln -s /dev/ceph-4e945599-23d6-4222-bd1f-017a4a869912/osd-block-24c2823e-d63b-4b1f-9c83-76cae58bbb38 /var/lib/ceph/osd/ceph-7/block +[2020-03-13 19:14:28,547][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-7/activate.monmap +[2020-03-13 19:14:28,547][ebin02][WARNING] stderr: 2020-03-13 19:14:13.452 7f7e1ac1e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-13 19:14:28,549][ebin02][WARNING] stderr: 2020-03-13 19:14:13.452 7f7e1ac1e0 -1 AuthRegistry(0x7f780814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-13 19:14:28,550][ebin02][WARNING] stderr: got monmap epoch 4 +[2020-03-13 19:14:28,552][ebin02][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-7/keyring --create-keyring --name osd.7 --add-key AQBpzWteiaeEMBAAakF8uj1EEE8fC21M3BvzMA== +[2020-03-13 19:14:28,553][ebin02][WARNING] stdout: creating /var/lib/ceph/osd/ceph-7/keyring +[2020-03-13 19:14:28,553][ebin02][WARNING] added entity osd.7 auth(key=AQBpzWteiaeEMBAAakF8uj1EEE8fC21M3BvzMA==) +[2020-03-13 19:14:28,554][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/keyring +[2020-03-13 19:14:28,555][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7/ +[2020-03-13 19:14:28,556][ebin02][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 7 --monmap /var/lib/ceph/osd/ceph-7/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-7/ --osd-uuid 24c2823e-d63b-4b1f-9c83-76cae58bbb38 --setuser ceph --setgroup ceph +[2020-03-13 19:14:28,557][ebin02][WARNING] --> ceph-volume lvm prepare successful for: /dev/sdb +[2020-03-13 19:14:28,557][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 +[2020-03-13 19:14:28,564][ebin02][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4e945599-23d6-4222-bd1f-017a4a869912/osd-block-24c2823e-d63b-4b1f-9c83-76cae58bbb38 --path /var/lib/ceph/osd/ceph-7 --no-mon-config +[2020-03-13 19:14:28,565][ebin02][WARNING] Running command: /bin/ln -snf /dev/ceph-4e945599-23d6-4222-bd1f-017a4a869912/osd-block-24c2823e-d63b-4b1f-9c83-76cae58bbb38 /var/lib/ceph/osd/ceph-7/block +[2020-03-13 19:14:28,565][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block +[2020-03-13 19:14:28,566][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-3 +[2020-03-13 19:14:28,566][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 +[2020-03-13 19:14:28,567][ebin02][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-7-24c2823e-d63b-4b1f-9c83-76cae58bbb38 +[2020-03-13 19:14:28,583][ebin02][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-7-24c2823e-d63b-4b1f-9c83-76cae58bbb38.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-13 19:14:28,583][ebin02][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@7 +[2020-03-13 19:14:28,584][ebin02][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@7.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-13 19:14:28,584][ebin02][WARNING] Running command: /bin/systemctl start ceph-osd@7 +[2020-03-13 19:14:28,584][ebin02][WARNING] --> ceph-volume lvm activate successful for osd ID: 7 +[2020-03-13 19:14:28,588][ebin02][WARNING] --> ceph-volume lvm create successful for: /dev/sdb +[2020-03-13 19:14:33,757][ebin02][INFO ] checking OSD status... +[2020-03-13 19:14:33,778][ebin02][DEBUG ] find the location of an executable +[2020-03-13 19:14:33,802][ebin02][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-13 19:14:35,323][ceph_deploy.osd][DEBUG ] Host ebin02 is now ready for osd use. +[2020-03-13 21:51:47,721][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 21:51:47,728][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny tumor pine03 +[2020-03-13 21:51:47,728][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 21:51:47,728][ceph_deploy.cli][INFO ] username : root +[2020-03-13 21:51:47,728][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 21:51:47,728][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-13 21:51:47,728][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny', 'tumor', 'pine03'] +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] func : +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 21:51:47,729][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 21:51:47,729][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-13 21:51:49,243][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-13 21:51:49,246][riot01][DEBUG ] detect platform information from remote host +[2020-03-13 21:51:49,420][riot01][DEBUG ] detect machine type +[2020-03-13 21:51:49,456][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:51:49,475][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-13 21:51:58,175][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-13 21:51:58,178][pine01][DEBUG ] detect platform information from remote host +[2020-03-13 21:51:59,230][pine01][DEBUG ] detect machine type +[2020-03-13 21:51:59,430][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:51:59,439][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-13 21:52:01,198][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-13 21:52:01,200][pine02][DEBUG ] detect platform information from remote host +[2020-03-13 21:52:01,357][pine02][DEBUG ] detect machine type +[2020-03-13 21:52:01,376][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:52:01,393][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-13 21:52:10,272][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-13 21:52:10,274][ebin01][DEBUG ] detect platform information from remote host +[2020-03-13 21:52:10,950][ebin01][DEBUG ] detect machine type +[2020-03-13 21:52:11,008][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:52:11,049][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-13 21:52:17,591][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-13 21:52:17,593][ebin02][DEBUG ] detect platform information from remote host +[2020-03-13 21:52:18,045][ebin02][DEBUG ] detect machine type +[2020-03-13 21:52:18,068][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:52:18,096][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-13 21:52:18,636][lenny][DEBUG ] connected to host: root@lenny +[2020-03-13 21:52:18,636][lenny][DEBUG ] detect platform information from remote host +[2020-03-13 21:52:18,653][lenny][DEBUG ] detect machine type +[2020-03-13 21:52:18,657][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:52:18,659][ceph_deploy.config][DEBUG ] Pushing config to tumor +[2020-03-13 21:52:19,641][tumor][DEBUG ] connected to host: root@tumor +[2020-03-13 21:52:19,642][tumor][DEBUG ] detect platform information from remote host +[2020-03-13 21:52:19,813][tumor][DEBUG ] detect machine type +[2020-03-13 21:52:19,833][tumor][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 21:52:19,849][ceph_deploy.config][DEBUG ] Pushing config to pine03 +[2020-03-13 21:52:30,572][pine03][DEBUG ] connected to host: root@pine03 +[2020-03-13 21:52:30,574][pine03][DEBUG ] detect platform information from remote host +[2020-03-13 21:52:31,298][pine03][DEBUG ] detect machine type +[2020-03-13 21:52:31,372][pine03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:15:59,880][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mon create pine03 +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] username : root +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] mon : ['pine03'] +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] func : +[2020-03-13 22:15:59,880][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 22:15:59,881][ceph_deploy.cli][INFO ] keyrings : None +[2020-03-13 22:15:59,881][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 22:15:59,891][ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts pine03 +[2020-03-13 22:15:59,891][ceph_deploy.mon][DEBUG ] detecting platform for host pine03 ... +[2020-03-13 22:16:07,142][pine03][DEBUG ] connected to host: root@pine03 +[2020-03-13 22:16:07,143][pine03][DEBUG ] detect platform information from remote host +[2020-03-13 22:16:07,805][pine03][DEBUG ] detect machine type +[2020-03-13 22:16:07,940][pine03][DEBUG ] find the location of an executable +[2020-03-13 22:16:08,003][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-13 22:16:08,004][pine03][DEBUG ] determining if provided host has same hostname in remote +[2020-03-13 22:16:08,004][pine03][DEBUG ] get remote short hostname +[2020-03-13 22:16:08,069][pine03][DEBUG ] deploying mon to pine03 +[2020-03-13 22:16:08,070][pine03][DEBUG ] get remote short hostname +[2020-03-13 22:16:08,074][pine03][DEBUG ] remote hostname: pine03 +[2020-03-13 22:16:08,146][pine03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:16:08,212][pine03][DEBUG ] create the mon path if it does not exist +[2020-03-13 22:16:08,290][pine03][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-pine03/done +[2020-03-13 22:16:08,294][pine03][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-pine03/done +[2020-03-13 22:16:08,352][pine03][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-pine03.mon.keyring +[2020-03-13 22:16:08,353][pine03][DEBUG ] create the monitor keyring file +[2020-03-13 22:16:08,459][pine03][INFO ] Running command: ceph-mon --cluster ceph --mkfs -i pine03 --keyring /var/lib/ceph/tmp/ceph-pine03.mon.keyring --setuser 64045 --setgroup 64045 +[2020-03-13 22:16:20,798][pine03][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-pine03.mon.keyring +[2020-03-13 22:16:20,819][pine03][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-13 22:16:20,823][pine03][DEBUG ] create the init path if it does not exist +[2020-03-13 22:16:20,890][pine03][INFO ] Running command: systemctl enable ceph.target +[2020-03-13 22:16:22,207][pine03][INFO ] Running command: systemctl enable ceph-mon@pine03 +[2020-03-13 22:16:22,480][pine03][WARNING] Created symlink /etc/systemd/system/ceph-mon.target.wants/ceph-mon@pine03.service → /lib/systemd/system/ceph-mon@.service. +[2020-03-13 22:16:23,615][pine03][INFO ] Running command: systemctl start ceph-mon@pine03 +[2020-03-13 22:16:26,078][pine03][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine03.asok mon_status +[2020-03-13 22:16:30,208][pine03][DEBUG ] ******************************************************************************** +[2020-03-13 22:16:30,208][pine03][DEBUG ] status for monitor: mon.pine03 +[2020-03-13 22:16:30,210][pine03][DEBUG ] { +[2020-03-13 22:16:30,210][pine03][DEBUG ] "election_epoch": 0, +[2020-03-13 22:16:30,210][pine03][DEBUG ] "extra_probe_peers": [ +[2020-03-13 22:16:30,210][pine03][DEBUG ] { +[2020-03-13 22:16:30,211][pine03][DEBUG ] "addrvec": [ +[2020-03-13 22:16:30,211][pine03][DEBUG ] { +[2020-03-13 22:16:30,211][pine03][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-13 22:16:30,211][pine03][DEBUG ] "nonce": 0, +[2020-03-13 22:16:30,211][pine03][DEBUG ] "type": "v2" +[2020-03-13 22:16:30,212][pine03][DEBUG ] }, +[2020-03-13 22:16:30,212][pine03][DEBUG ] { +[2020-03-13 22:16:30,212][pine03][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-13 22:16:30,212][pine03][DEBUG ] "nonce": 0, +[2020-03-13 22:16:30,212][pine03][DEBUG ] "type": "v1" +[2020-03-13 22:16:30,212][pine03][DEBUG ] } +[2020-03-13 22:16:30,213][pine03][DEBUG ] ] +[2020-03-13 22:16:30,213][pine03][DEBUG ] }, +[2020-03-13 22:16:30,213][pine03][DEBUG ] { +[2020-03-13 22:16:30,213][pine03][DEBUG ] "addrvec": [ +[2020-03-13 22:16:30,214][pine03][DEBUG ] { +[2020-03-13 22:16:30,214][pine03][DEBUG ] "addr": "192.168.10.161:3300", +[2020-03-13 22:16:30,214][pine03][DEBUG ] "nonce": 0, +[2020-03-13 22:16:30,214][pine03][DEBUG ] "type": "v2" +[2020-03-13 22:16:30,214][pine03][DEBUG ] }, +[2020-03-13 22:16:30,214][pine03][DEBUG ] { +[2020-03-13 22:16:30,215][pine03][DEBUG ] "addr": "192.168.10.161:6789", +[2020-03-13 22:16:30,215][pine03][DEBUG ] "nonce": 0, +[2020-03-13 22:16:30,215][pine03][DEBUG ] "type": "v1" +[2020-03-13 22:16:30,215][pine03][DEBUG ] } +[2020-03-13 22:16:30,215][pine03][DEBUG ] ] +[2020-03-13 22:16:30,216][pine03][DEBUG ] } +[2020-03-13 22:16:30,216][pine03][DEBUG ] ], +[2020-03-13 22:16:30,216][pine03][DEBUG ] "feature_map": { +[2020-03-13 22:16:30,216][pine03][DEBUG ] "mon": [ +[2020-03-13 22:16:30,217][pine03][DEBUG ] { +[2020-03-13 22:16:30,217][pine03][DEBUG ] "features": "0x3ffddff8ffacffff", +[2020-03-13 22:16:30,217][pine03][DEBUG ] "num": 1, +[2020-03-13 22:16:30,217][pine03][DEBUG ] "release": "luminous" +[2020-03-13 22:16:30,217][pine03][DEBUG ] } +[2020-03-13 22:16:30,218][pine03][DEBUG ] ] +[2020-03-13 22:16:30,218][pine03][DEBUG ] }, +[2020-03-13 22:16:30,218][pine03][DEBUG ] "features": { +[2020-03-13 22:16:30,218][pine03][DEBUG ] "quorum_con": "0", +[2020-03-13 22:16:30,218][pine03][DEBUG ] "quorum_mon": [], +[2020-03-13 22:16:30,219][pine03][DEBUG ] "required_con": "0", +[2020-03-13 22:16:30,219][pine03][DEBUG ] "required_mon": [] +[2020-03-13 22:16:30,219][pine03][DEBUG ] }, +[2020-03-13 22:16:30,220][pine03][DEBUG ] "monmap": { +[2020-03-13 22:16:30,220][pine03][DEBUG ] "created": "2020-03-13 22:16:14.498641", +[2020-03-13 22:16:30,220][pine03][DEBUG ] "epoch": 0, +[2020-03-13 22:16:30,220][pine03][DEBUG ] "features": { +[2020-03-13 22:16:30,221][pine03][DEBUG ] "optional": [], +[2020-03-13 22:16:30,221][pine03][DEBUG ] "persistent": [] +[2020-03-13 22:16:30,221][pine03][DEBUG ] }, +[2020-03-13 22:16:30,221][pine03][DEBUG ] "fsid": "29ef4020-303a-4b2e-aa24-a1e20e5ba21c", +[2020-03-13 22:16:30,221][pine03][DEBUG ] "min_mon_release": 0, +[2020-03-13 22:16:30,222][pine03][DEBUG ] "min_mon_release_name": "unknown", +[2020-03-13 22:16:30,222][pine03][DEBUG ] "modified": "2020-03-13 22:16:14.498641", +[2020-03-13 22:16:30,222][pine03][DEBUG ] "mons": [ +[2020-03-13 22:16:30,222][pine03][DEBUG ] { +[2020-03-13 22:16:30,223][pine03][DEBUG ] "addr": "0.0.0.0:0/1", +[2020-03-13 22:16:30,223][pine03][DEBUG ] "name": "pine01", +[2020-03-13 22:16:30,223][pine03][DEBUG ] "public_addr": "0.0.0.0:0/1", +[2020-03-13 22:16:30,223][pine03][DEBUG ] "public_addrs": { +[2020-03-13 22:16:30,224][pine03][DEBUG ] "addrvec": [ +[2020-03-13 22:16:30,224][pine03][DEBUG ] { +[2020-03-13 22:16:30,224][pine03][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-13 22:16:30,224][pine03][DEBUG ] "nonce": 1, +[2020-03-13 22:16:30,224][pine03][DEBUG ] "type": "v1" +[2020-03-13 22:16:30,224][pine03][DEBUG ] } +[2020-03-13 22:16:30,225][pine03][DEBUG ] ] +[2020-03-13 22:16:30,225][pine03][DEBUG ] }, +[2020-03-13 22:16:30,225][pine03][DEBUG ] "rank": 0 +[2020-03-13 22:16:30,225][pine03][DEBUG ] }, +[2020-03-13 22:16:30,225][pine03][DEBUG ] { +[2020-03-13 22:16:30,225][pine03][DEBUG ] "addr": "0.0.0.0:0/2", +[2020-03-13 22:16:30,225][pine03][DEBUG ] "name": "pine02", +[2020-03-13 22:16:30,225][pine03][DEBUG ] "public_addr": "0.0.0.0:0/2", +[2020-03-13 22:16:30,225][pine03][DEBUG ] "public_addrs": { +[2020-03-13 22:16:30,226][pine03][DEBUG ] "addrvec": [ +[2020-03-13 22:16:30,226][pine03][DEBUG ] { +[2020-03-13 22:16:30,226][pine03][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-13 22:16:30,226][pine03][DEBUG ] "nonce": 2, +[2020-03-13 22:16:30,226][pine03][DEBUG ] "type": "v1" +[2020-03-13 22:16:30,226][pine03][DEBUG ] } +[2020-03-13 22:16:30,226][pine03][DEBUG ] ] +[2020-03-13 22:16:30,226][pine03][DEBUG ] }, +[2020-03-13 22:16:30,226][pine03][DEBUG ] "rank": 1 +[2020-03-13 22:16:30,227][pine03][DEBUG ] } +[2020-03-13 22:16:30,227][pine03][DEBUG ] ] +[2020-03-13 22:16:30,227][pine03][DEBUG ] }, +[2020-03-13 22:16:30,227][pine03][DEBUG ] "name": "pine03", +[2020-03-13 22:16:30,227][pine03][DEBUG ] "outside_quorum": [], +[2020-03-13 22:16:30,227][pine03][DEBUG ] "quorum": [], +[2020-03-13 22:16:30,227][pine03][DEBUG ] "rank": -1, +[2020-03-13 22:16:30,227][pine03][DEBUG ] "state": "probing", +[2020-03-13 22:16:30,227][pine03][DEBUG ] "sync_provider": [] +[2020-03-13 22:16:30,228][pine03][DEBUG ] } +[2020-03-13 22:16:30,228][pine03][DEBUG ] ******************************************************************************** +[2020-03-13 22:16:30,228][pine03][INFO ] monitor: mon.pine03 is currently at the state of probing +[2020-03-13 22:16:30,315][pine03][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine03.asok mon_status +[2020-03-13 22:16:31,286][pine03][WARNING] pine03 is not defined in `mon initial members` +[2020-03-13 22:16:31,286][pine03][WARNING] monitor pine03 does not exist in monmap +[2020-03-13 22:18:11,967][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 22:18:11,968][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny tumor pine03 +[2020-03-13 22:18:11,977][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 22:18:11,977][ceph_deploy.cli][INFO ] username : root +[2020-03-13 22:18:11,977][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 22:18:11,977][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny', 'tumor', 'pine03'] +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] func : +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 22:18:11,978][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 22:18:11,978][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-13 22:18:13,472][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-13 22:18:13,474][riot01][DEBUG ] detect platform information from remote host +[2020-03-13 22:18:13,642][riot01][DEBUG ] detect machine type +[2020-03-13 22:18:13,676][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:18:13,698][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-13 22:20:24,023][ceph_deploy.config][ERROR ] connecting to host: root@pine01 resulted in errors: HostNotFound root@pine01 +[2020-03-13 22:20:24,024][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-13 22:20:25,740][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-13 22:20:25,742][pine02][DEBUG ] detect platform information from remote host +[2020-03-13 22:20:25,898][pine02][DEBUG ] detect machine type +[2020-03-13 22:20:25,917][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:20:25,932][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-13 22:20:28,312][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-13 22:20:28,314][ebin01][DEBUG ] detect platform information from remote host +[2020-03-13 22:20:28,526][ebin01][DEBUG ] detect machine type +[2020-03-13 22:20:28,549][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:20:28,561][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-13 22:20:30,716][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-13 22:20:30,718][ebin02][DEBUG ] detect platform information from remote host +[2020-03-13 22:20:30,884][ebin02][DEBUG ] detect machine type +[2020-03-13 22:20:30,925][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:20:30,937][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-13 22:20:31,677][lenny][DEBUG ] connected to host: root@lenny +[2020-03-13 22:20:31,678][lenny][DEBUG ] detect platform information from remote host +[2020-03-13 22:20:31,706][lenny][DEBUG ] detect machine type +[2020-03-13 22:20:31,710][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:20:31,712][ceph_deploy.config][DEBUG ] Pushing config to tumor +[2020-03-13 22:20:32,702][tumor][DEBUG ] connected to host: root@tumor +[2020-03-13 22:20:32,705][tumor][DEBUG ] detect platform information from remote host +[2020-03-13 22:20:32,848][tumor][DEBUG ] detect machine type +[2020-03-13 22:20:32,868][tumor][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:20:32,880][ceph_deploy.config][DEBUG ] Pushing config to pine03 +[2020-03-13 22:20:39,513][pine03][DEBUG ] connected to host: root@pine03 +[2020-03-13 22:20:39,514][pine03][DEBUG ] detect platform information from remote host +[2020-03-13 22:20:39,680][pine03][DEBUG ] detect machine type +[2020-03-13 22:20:39,812][pine03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:20:42,354][ceph_deploy][ERROR ] GenericError: Failed to config 1 hosts + +[2020-03-13 22:46:13,300][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 lenny pine03 +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] username : root +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] verbose : False +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] quiet : False +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-13 22:46:13,300][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-13 22:46:13,301][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02', 'lenny', 'pine03'] +[2020-03-13 22:46:13,301][ceph_deploy.cli][INFO ] func : +[2020-03-13 22:46:13,301][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-13 22:46:13,301][ceph_deploy.cli][INFO ] default_release : False +[2020-03-13 22:46:13,301][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-13 22:46:14,743][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-13 22:46:14,744][riot01][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:14,916][riot01][DEBUG ] detect machine type +[2020-03-13 22:46:14,951][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:46:14,970][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-13 22:46:24,141][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-13 22:46:24,143][pine01][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:24,817][pine01][DEBUG ] detect machine type +[2020-03-13 22:46:24,892][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:46:25,007][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-13 22:46:26,547][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-13 22:46:26,549][pine02][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:26,673][pine02][DEBUG ] detect machine type +[2020-03-13 22:46:26,693][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:46:26,715][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-13 22:46:29,415][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-13 22:46:29,417][ebin01][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:29,656][ebin01][DEBUG ] detect machine type +[2020-03-13 22:46:29,682][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:46:29,701][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-13 22:46:32,829][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-13 22:46:32,831][ebin02][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:33,104][ebin02][DEBUG ] detect machine type +[2020-03-13 22:46:33,136][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:46:33,162][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-13 22:46:33,531][lenny][DEBUG ] connected to host: root@lenny +[2020-03-13 22:46:33,532][lenny][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:33,548][lenny][DEBUG ] detect machine type +[2020-03-13 22:46:33,551][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-13 22:46:33,553][ceph_deploy.config][DEBUG ] Pushing config to pine03 +[2020-03-13 22:46:44,406][pine03][DEBUG ] connected to host: root@pine03 +[2020-03-13 22:46:44,408][pine03][DEBUG ] detect platform information from remote host +[2020-03-13 22:46:44,882][pine03][DEBUG ] detect machine type +[2020-03-13 22:46:44,957][pine03][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf diff --git a/ceph.conf b/ceph.conf index 1cd50d6..9a4f3b6 100644 --- a/ceph.conf +++ b/ceph.conf @@ -1,8 +1,9 @@ [global] fsid = 29ef4020-303a-4b2e-aa24-a1e20e5ba21c #ms_bind_ipv6 = true -mon_initial_members = pine01, pine02 -mon_host = 192.168.10.160, 192.168.10.161 +mon_initial_members = pine01, pine02, pine03 +mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789], [v2:192.168.10.19:3300,v1:192.168.10.19:6789] + auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx @@ -12,16 +13,24 @@ osd pool default pg num = 8 osd pool default pgp num = 8 osd pool default size = 2 # Write an object 3 times. osd pool default min size = 1 # Allow writing two copies in a degraded state. -osd max backfills = 2 +osd max backfills = 1 +#50mb / 5mb / 64mb +osd_memory_target = 536870912 #512Mb +bluestore_cache_autotune = false +bluestore_cache_size = 52428800 +bluestore_cache_size_hdd = 5242880 +bluestore_cache_size_ssd = 5242880 +bluestore_cache_kv_max = 67108864 -[mon.pine01] - host = pine01 - address = 192.168.10.160:6789 -[mon.pine02] - host = pine02 - address = 192.168.10.161:6789 - -[mon.riot01] - host = riot01 - address = 192.168.10.164:6789 +#[mon.pine01] +# host = pine01 +# address = 192.168.10.160:6789 +# +#[mon.pine02] +# host = pine02 +# address = 192.168.10.161:6789 +# +#[mon.riot01] +# host = riot01 +# address = 192.168.10.164:6789