From 98fb03c125d27498ef42d468e842886c8627c4c7 Mon Sep 17 00:00:00 2001 From: Udo Waechter Date: Tue, 3 Mar 2020 22:16:40 +0100 Subject: [PATCH] cluster min_size 1, size 2 --- ceph-deploy-ceph.log | 1737 ++++++++++++++++++++++++++++++++++++ ceph.bootstrap-mds.keyring | 3 + ceph.bootstrap-mgr.keyring | 3 + ceph.bootstrap-osd.keyring | 3 + ceph.bootstrap-rgw.keyring | 3 + ceph.client.admin.keyring | 6 + ceph.conf | 11 + ceph.mon.keyring | 3 + 8 files changed, 1769 insertions(+) create mode 100644 ceph-deploy-ceph.log create mode 100644 ceph.bootstrap-mds.keyring create mode 100644 ceph.bootstrap-mgr.keyring create mode 100644 ceph.bootstrap-osd.keyring create mode 100644 ceph.bootstrap-rgw.keyring create mode 100644 ceph.client.admin.keyring create mode 100644 ceph.conf create mode 100644 ceph.mon.keyring diff --git a/ceph-deploy-ceph.log b/ceph-deploy-ceph.log new file mode 100644 index 0000000..9581f65 --- /dev/null +++ b/ceph-deploy-ceph.log @@ -0,0 +1,1737 @@ +[2020-03-03 20:33:08,371][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:33:08,371][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root new pine01 pine02 +[2020-03-03 20:33:08,371][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:33:08,371][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:33:08,371][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] ssh_copykey : True +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] mon : ['pine01', 'pine02'] +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] public_network : None +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] cluster_network : None +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:33:08,372][ceph_deploy.cli][INFO ] fsid : None +[2020-03-03 20:33:08,372][ceph_deploy.new][DEBUG ] Creating new cluster named ceph +[2020-03-03 20:33:08,372][ceph_deploy.new][INFO ] making sure passwordless SSH succeeds +[2020-03-03 20:33:08,386][pine01][DEBUG ] connected to host: lenny +[2020-03-03 20:33:08,390][pine01][INFO ] Running command: ssh -CT -o BatchMode=yes pine01 +[2020-03-03 20:33:09,008][ceph_deploy.new][WARNING] could not connect via SSH +[2020-03-03 20:33:09,009][ceph_deploy.new][INFO ] will connect again with password prompt +[2020-03-03 20:33:10,193][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:33:10,194][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:33:10,339][pine01][DEBUG ] detect machine type +[2020-03-03 20:33:10,366][ceph_deploy.new][INFO ] adding public keys to authorized_keys +[2020-03-03 20:33:10,366][pine01][DEBUG ] append contents to file +[2020-03-03 20:33:12,768][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:33:12,769][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:33:12,912][pine01][DEBUG ] detect machine type +[2020-03-03 20:33:12,933][pine01][DEBUG ] find the location of an executable +[2020-03-03 20:33:12,943][pine01][INFO ] Running command: /bin/ip link show +[2020-03-03 20:33:12,982][pine01][INFO ] Running command: /bin/ip addr show +[2020-03-03 20:33:13,015][pine01][DEBUG ] IP addresses found: [u'192.168.10.160', u'fd87:3937:d2b7:0:ba:c9ff:fe22:3d'] +[2020-03-03 20:33:13,016][ceph_deploy.new][DEBUG ] Resolving host pine01 +[2020-03-03 20:33:13,018][ceph_deploy.new][DEBUG ] Monitor pine01 at 192.168.10.160 +[2020-03-03 20:33:13,018][ceph_deploy.new][INFO ] making sure passwordless SSH succeeds +[2020-03-03 20:33:13,029][pine02][DEBUG ] connected to host: lenny +[2020-03-03 20:33:13,032][pine02][INFO ] Running command: ssh -CT -o BatchMode=yes pine02 +[2020-03-03 20:33:13,247][ceph_deploy.new][WARNING] could not connect via SSH +[2020-03-03 20:33:13,247][ceph_deploy.new][INFO ] will connect again with password prompt +[2020-03-03 20:33:13,919][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:33:13,919][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:33:14,040][pine02][DEBUG ] detect machine type +[2020-03-03 20:33:14,070][ceph_deploy.new][INFO ] adding public keys to authorized_keys +[2020-03-03 20:33:14,070][pine02][DEBUG ] append contents to file +[2020-03-03 20:33:15,594][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:33:15,595][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:33:15,717][pine02][DEBUG ] detect machine type +[2020-03-03 20:33:15,741][pine02][DEBUG ] find the location of an executable +[2020-03-03 20:33:15,754][pine02][INFO ] Running command: /bin/ip link show +[2020-03-03 20:33:15,794][pine02][INFO ] Running command: /bin/ip addr show +[2020-03-03 20:33:15,827][pine02][DEBUG ] IP addresses found: [u'192.168.10.161', u'fd87:3937:d2b7:0:9716:6bcb:617c:e7c6', u'fd87:3937:d2b7::b5f'] +[2020-03-03 20:33:15,827][ceph_deploy.new][DEBUG ] Resolving host pine02 +[2020-03-03 20:33:15,830][ceph_deploy.new][DEBUG ] Monitor pine02 at fd87:3937:d2b7::b5f +[2020-03-03 20:33:15,830][ceph_deploy.new][INFO ] Monitors are IPv6, binding Messenger traffic on IPv6 +[2020-03-03 20:33:15,830][ceph_deploy.new][DEBUG ] Monitor initial members are ['pine01', 'pine02'] +[2020-03-03 20:33:15,830][ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.10.160', '[fd87:3937:d2b7::b5f]'] +[2020-03-03 20:33:15,830][ceph_deploy.new][DEBUG ] Creating a random mon key... +[2020-03-03 20:33:15,830][ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring... +[2020-03-03 20:33:15,830][ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf... +[2020-03-03 20:33:22,908][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mon create-initial +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] subcommand : create-initial +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] keyrings : None +[2020-03-03 20:33:22,909][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:33:22,910][ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts pine01 pine02 +[2020-03-03 20:33:22,910][ceph_deploy.mon][DEBUG ] detecting platform for host pine01 ... +[2020-03-03 20:33:26,188][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:33:26,188][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:33:26,350][pine01][DEBUG ] detect machine type +[2020-03-03 20:33:26,486][pine01][DEBUG ] find the location of an executable +[2020-03-03 20:33:26,516][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-03 20:33:26,516][pine01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-03 20:33:26,516][pine01][DEBUG ] get remote short hostname +[2020-03-03 20:33:26,573][pine01][DEBUG ] deploying mon to pine01 +[2020-03-03 20:33:26,573][pine01][DEBUG ] get remote short hostname +[2020-03-03 20:33:26,592][pine01][DEBUG ] remote hostname: pine01 +[2020-03-03 20:33:26,643][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:33:26,675][pine01][DEBUG ] create the mon path if it does not exist +[2020-03-03 20:33:26,713][pine01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-pine01/done +[2020-03-03 20:33:26,735][pine01][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-pine01/done +[2020-03-03 20:33:26,759][pine01][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-pine01.mon.keyring +[2020-03-03 20:33:26,759][pine01][DEBUG ] create the monitor keyring file +[2020-03-03 20:33:26,807][pine01][INFO ] Running command: ceph-mon --cluster ceph --mkfs -i pine01 --keyring /var/lib/ceph/tmp/ceph-pine01.mon.keyring --setuser 64045 --setgroup 64045 +[2020-03-03 20:33:29,481][pine01][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-pine01.mon.keyring +[2020-03-03 20:33:29,518][pine01][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-03 20:33:29,545][pine01][DEBUG ] create the init path if it does not exist +[2020-03-03 20:33:29,593][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-03 20:33:36,596][pine01][WARNING] No data was received after 7 seconds, disconnecting... +[2020-03-03 20:33:36,632][pine01][INFO ] Running command: systemctl enable ceph-mon@pine01 +[2020-03-03 20:33:43,634][pine01][WARNING] No data was received after 7 seconds, disconnecting... +[2020-03-03 20:33:43,671][pine01][INFO ] Running command: systemctl start ceph-mon@pine01 +[2020-03-03 20:33:50,674][pine01][WARNING] No data was received after 7 seconds, disconnecting... +[2020-03-03 20:33:52,683][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:33:53,449][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:33:53,449][pine01][WARNING] monitor: mon.pine01, might not be running yet +[2020-03-03 20:33:53,455][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:33:54,221][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:33:54,221][pine01][WARNING] monitor pine01 does not exist in monmap +[2020-03-03 20:33:54,222][pine01][WARNING] neither `public_addr` nor `public_network` keys are defined for monitors +[2020-03-03 20:33:54,222][pine01][WARNING] monitors may not be able to form quorum +[2020-03-03 20:33:54,222][ceph_deploy.mon][DEBUG ] detecting platform for host pine02 ... +[2020-03-03 20:33:55,805][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:33:55,806][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:33:55,948][pine02][DEBUG ] detect machine type +[2020-03-03 20:33:55,975][pine02][DEBUG ] find the location of an executable +[2020-03-03 20:33:55,981][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-03 20:33:55,982][pine02][DEBUG ] determining if provided host has same hostname in remote +[2020-03-03 20:33:55,982][pine02][DEBUG ] get remote short hostname +[2020-03-03 20:33:55,988][pine02][DEBUG ] deploying mon to pine02 +[2020-03-03 20:33:55,988][pine02][DEBUG ] get remote short hostname +[2020-03-03 20:33:55,994][pine02][DEBUG ] remote hostname: pine02 +[2020-03-03 20:33:56,006][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:33:56,018][pine02][DEBUG ] create the mon path if it does not exist +[2020-03-03 20:33:56,024][pine02][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-pine02/done +[2020-03-03 20:33:56,030][pine02][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-pine02/done +[2020-03-03 20:33:56,036][pine02][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-pine02.mon.keyring +[2020-03-03 20:33:56,036][pine02][DEBUG ] create the monitor keyring file +[2020-03-03 20:33:56,052][pine02][INFO ] Running command: ceph-mon --cluster ceph --mkfs -i pine02 --keyring /var/lib/ceph/tmp/ceph-pine02.mon.keyring --setuser 64045 --setgroup 64045 +[2020-03-03 20:33:56,520][pine02][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-pine02.mon.keyring +[2020-03-03 20:33:56,527][pine02][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-03 20:33:56,532][pine02][DEBUG ] create the init path if it does not exist +[2020-03-03 20:33:56,546][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-03 20:33:57,222][pine02][INFO ] Running command: systemctl enable ceph-mon@pine02 +[2020-03-03 20:33:57,898][pine02][INFO ] Running command: systemctl start ceph-mon@pine02 +[2020-03-03 20:33:59,976][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:34:00,792][pine02][DEBUG ] ******************************************************************************** +[2020-03-03 20:34:00,792][pine02][DEBUG ] status for monitor: mon.pine02 +[2020-03-03 20:34:00,792][pine02][DEBUG ] { +[2020-03-03 20:34:00,792][pine02][DEBUG ] "election_epoch": 0, +[2020-03-03 20:34:00,792][pine02][DEBUG ] "extra_probe_peers": [ +[2020-03-03 20:34:00,792][pine02][DEBUG ] { +[2020-03-03 20:34:00,792][pine02][DEBUG ] "addrvec": [ +[2020-03-03 20:34:00,792][pine02][DEBUG ] { +[2020-03-03 20:34:00,793][pine02][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-03 20:34:00,793][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:34:00,793][pine02][DEBUG ] "type": "v2" +[2020-03-03 20:34:00,793][pine02][DEBUG ] }, +[2020-03-03 20:34:00,793][pine02][DEBUG ] { +[2020-03-03 20:34:00,793][pine02][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-03 20:34:00,793][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:34:00,793][pine02][DEBUG ] "type": "v1" +[2020-03-03 20:34:00,793][pine02][DEBUG ] } +[2020-03-03 20:34:00,793][pine02][DEBUG ] ] +[2020-03-03 20:34:00,793][pine02][DEBUG ] } +[2020-03-03 20:34:00,793][pine02][DEBUG ] ], +[2020-03-03 20:34:00,793][pine02][DEBUG ] "feature_map": { +[2020-03-03 20:34:00,793][pine02][DEBUG ] "mon": [ +[2020-03-03 20:34:00,793][pine02][DEBUG ] { +[2020-03-03 20:34:00,793][pine02][DEBUG ] "features": "0x3ffddff8ffacffff", +[2020-03-03 20:34:00,793][pine02][DEBUG ] "num": 1, +[2020-03-03 20:34:00,794][pine02][DEBUG ] "release": "luminous" +[2020-03-03 20:34:00,794][pine02][DEBUG ] } +[2020-03-03 20:34:00,794][pine02][DEBUG ] ] +[2020-03-03 20:34:00,794][pine02][DEBUG ] }, +[2020-03-03 20:34:00,794][pine02][DEBUG ] "features": { +[2020-03-03 20:34:00,794][pine02][DEBUG ] "quorum_con": "0", +[2020-03-03 20:34:00,794][pine02][DEBUG ] "quorum_mon": [], +[2020-03-03 20:34:00,794][pine02][DEBUG ] "required_con": "0", +[2020-03-03 20:34:00,794][pine02][DEBUG ] "required_mon": [] +[2020-03-03 20:34:00,794][pine02][DEBUG ] }, +[2020-03-03 20:34:00,794][pine02][DEBUG ] "monmap": { +[2020-03-03 20:34:00,794][pine02][DEBUG ] "created": "2020-03-03 20:33:56.229076", +[2020-03-03 20:34:00,794][pine02][DEBUG ] "epoch": 0, +[2020-03-03 20:34:00,794][pine02][DEBUG ] "features": { +[2020-03-03 20:34:00,794][pine02][DEBUG ] "optional": [], +[2020-03-03 20:34:00,794][pine02][DEBUG ] "persistent": [] +[2020-03-03 20:34:00,794][pine02][DEBUG ] }, +[2020-03-03 20:34:00,795][pine02][DEBUG ] "fsid": "29ef4020-303a-4b2e-aa24-a1e20e5ba21c", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "min_mon_release": 0, +[2020-03-03 20:34:00,795][pine02][DEBUG ] "min_mon_release_name": "unknown", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "modified": "2020-03-03 20:33:56.229076", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "mons": [ +[2020-03-03 20:34:00,795][pine02][DEBUG ] { +[2020-03-03 20:34:00,795][pine02][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789/0", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "name": "pine02", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "public_addr": "[fd87:3937:d2b7::b5f]:6789/0", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "public_addrs": { +[2020-03-03 20:34:00,795][pine02][DEBUG ] "addrvec": [ +[2020-03-03 20:34:00,795][pine02][DEBUG ] { +[2020-03-03 20:34:00,795][pine02][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:3300", +[2020-03-03 20:34:00,795][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:34:00,795][pine02][DEBUG ] "type": "v2" +[2020-03-03 20:34:00,796][pine02][DEBUG ] }, +[2020-03-03 20:34:00,796][pine02][DEBUG ] { +[2020-03-03 20:34:00,796][pine02][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789", +[2020-03-03 20:34:00,796][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:34:00,796][pine02][DEBUG ] "type": "v1" +[2020-03-03 20:34:00,796][pine02][DEBUG ] } +[2020-03-03 20:34:00,796][pine02][DEBUG ] ] +[2020-03-03 20:34:00,796][pine02][DEBUG ] }, +[2020-03-03 20:34:00,796][pine02][DEBUG ] "rank": 0 +[2020-03-03 20:34:00,796][pine02][DEBUG ] }, +[2020-03-03 20:34:00,796][pine02][DEBUG ] { +[2020-03-03 20:34:00,796][pine02][DEBUG ] "addr": "0.0.0.0:0/1", +[2020-03-03 20:34:00,796][pine02][DEBUG ] "name": "pine01", +[2020-03-03 20:34:00,796][pine02][DEBUG ] "public_addr": "0.0.0.0:0/1", +[2020-03-03 20:34:00,796][pine02][DEBUG ] "public_addrs": { +[2020-03-03 20:34:00,796][pine02][DEBUG ] "addrvec": [ +[2020-03-03 20:34:00,797][pine02][DEBUG ] { +[2020-03-03 20:34:00,797][pine02][DEBUG ] "addr": "0.0.0.0:0", +[2020-03-03 20:34:00,797][pine02][DEBUG ] "nonce": 1, +[2020-03-03 20:34:00,797][pine02][DEBUG ] "type": "v1" +[2020-03-03 20:34:00,797][pine02][DEBUG ] } +[2020-03-03 20:34:00,797][pine02][DEBUG ] ] +[2020-03-03 20:34:00,797][pine02][DEBUG ] }, +[2020-03-03 20:34:00,797][pine02][DEBUG ] "rank": 1 +[2020-03-03 20:34:00,797][pine02][DEBUG ] } +[2020-03-03 20:34:00,797][pine02][DEBUG ] ] +[2020-03-03 20:34:00,797][pine02][DEBUG ] }, +[2020-03-03 20:34:00,797][pine02][DEBUG ] "name": "pine02", +[2020-03-03 20:34:00,797][pine02][DEBUG ] "outside_quorum": [ +[2020-03-03 20:34:00,797][pine02][DEBUG ] "pine02" +[2020-03-03 20:34:00,797][pine02][DEBUG ] ], +[2020-03-03 20:34:00,797][pine02][DEBUG ] "quorum": [], +[2020-03-03 20:34:00,798][pine02][DEBUG ] "rank": 0, +[2020-03-03 20:34:00,798][pine02][DEBUG ] "state": "probing", +[2020-03-03 20:34:00,798][pine02][DEBUG ] "sync_provider": [] +[2020-03-03 20:34:00,798][pine02][DEBUG ] } +[2020-03-03 20:34:00,798][pine02][DEBUG ] ******************************************************************************** +[2020-03-03 20:34:00,798][pine02][INFO ] monitor: mon.pine02 is running +[2020-03-03 20:34:00,806][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:34:01,522][ceph_deploy.mon][INFO ] processing monitor mon.pine01 +[2020-03-03 20:34:53,559][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root disk zap ebin02 /dev/sda +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] subcommand : zap +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:34:53,560][ceph_deploy.cli][INFO ] disk : ['/dev/sda'] +[2020-03-03 20:34:53,561][ceph_deploy.osd][DEBUG ] zapping /dev/sda on ebin02 +[2020-03-03 20:34:53,892][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:34:53,892][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:34:54,036][pine01][DEBUG ] detect machine type +[2020-03-03 20:34:54,056][pine01][DEBUG ] find the location of an executable +[2020-03-03 20:34:54,066][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:34:54,832][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:34:54,832][ceph_deploy.mon][WARNING] mon.pine01 monitor is not yet in quorum, tries left: 5 +[2020-03-03 20:34:54,832][ceph_deploy.mon][WARNING] waiting 5 seconds before retrying +[2020-03-03 20:34:55,858][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 20:34:55,858][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 20:34:56,054][ebin02][DEBUG ] detect machine type +[2020-03-03 20:34:56,083][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:34:56,090][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:34:56,091][ebin02][DEBUG ] zeroing last few blocks of device +[2020-03-03 20:34:56,097][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:34:56,114][ebin02][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sda +[2020-03-03 20:34:59,842][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:35:00,608][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:35:00,608][ceph_deploy.mon][WARNING] mon.pine01 monitor is not yet in quorum, tries left: 4 +[2020-03-03 20:35:00,608][ceph_deploy.mon][WARNING] waiting 10 seconds before retrying +[2020-03-03 20:35:10,656][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:35:11,472][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:35:11,472][ceph_deploy.mon][WARNING] mon.pine01 monitor is not yet in quorum, tries left: 3 +[2020-03-03 20:35:11,472][ceph_deploy.mon][WARNING] waiting 10 seconds before retrying +[2020-03-03 20:35:21,514][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:35:22,330][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:35:22,330][ceph_deploy.mon][WARNING] mon.pine01 monitor is not yet in quorum, tries left: 2 +[2020-03-03 20:35:22,330][ceph_deploy.mon][WARNING] waiting 15 seconds before retrying +[2020-03-03 20:35:37,350][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:35:38,116][pine01][ERROR ] admin_socket: exception getting command descriptions: [Errno 2] No such file or directory +[2020-03-03 20:35:38,116][ceph_deploy.mon][WARNING] mon.pine01 monitor is not yet in quorum, tries left: 1 +[2020-03-03 20:35:38,116][ceph_deploy.mon][WARNING] waiting 20 seconds before retrying +[2020-03-03 20:35:43,876][ebin02][WARNING] --> Zapping: /dev/sda +[2020-03-03 20:35:43,876][ebin02][WARNING] Running command: /bin/dd if=/dev/zero of=/dev/ceph-e4e20c91-d359-4365-8866-89819da498da/osd-block-aecef4d2-31d5-4e48-bdb8-30b94fc461bb bs=1M count=10 +[2020-03-03 20:35:43,876][ebin02][WARNING] stderr: 10+0 records in +[2020-03-03 20:35:43,876][ebin02][WARNING] 10+0 records out +[2020-03-03 20:35:43,884][ebin02][WARNING] stderr: 10485760 bytes (10 MB, 10 MiB) copied, 0.101732 s, 103 MB/s +[2020-03-03 20:35:43,887][ebin02][WARNING] --> --destroy was not specified, but zapping a whole device will remove the partition table +[2020-03-03 20:35:43,894][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,902][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,905][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,912][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,916][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,923][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,926][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,934][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,941][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,945][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,946][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,949][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,949][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,952][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,953][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:35:43,953][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:35:43,956][ebin02][WARNING] --> RuntimeError: could not complete wipefs on device: /dev/sda +[2020-03-03 20:35:44,120][ebin02][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:35:44,121][ceph_deploy][ERROR ] RuntimeError: Failed to execute command: /usr/sbin/ceph-volume lvm zap /dev/sda + +[2020-03-03 20:35:58,136][ceph_deploy.mon][INFO ] processing monitor mon.pine02 +[2020-03-03 20:35:59,627][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:35:59,627][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:35:59,749][pine02][DEBUG ] detect machine type +[2020-03-03 20:35:59,773][pine02][DEBUG ] find the location of an executable +[2020-03-03 20:35:59,786][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:36:00,552][ceph_deploy.mon][WARNING] mon.pine02 monitor is not yet in quorum, tries left: 5 +[2020-03-03 20:36:00,552][ceph_deploy.mon][WARNING] waiting 5 seconds before retrying +[2020-03-03 20:36:05,565][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:36:06,381][ceph_deploy.mon][WARNING] mon.pine02 monitor is not yet in quorum, tries left: 4 +[2020-03-03 20:36:06,382][ceph_deploy.mon][WARNING] waiting 10 seconds before retrying +[2020-03-03 20:36:16,397][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:36:17,213][ceph_deploy.mon][WARNING] mon.pine02 monitor is not yet in quorum, tries left: 3 +[2020-03-03 20:36:17,213][ceph_deploy.mon][WARNING] waiting 10 seconds before retrying +[2020-03-03 20:36:22,225][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root disk zap ebin02 /dev/sdb +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] subcommand : zap +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:36:22,225][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-03 20:36:22,226][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:36:22,226][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:36:22,226][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:36:22,226][ceph_deploy.cli][INFO ] disk : ['/dev/sdb'] +[2020-03-03 20:36:22,226][ceph_deploy.osd][DEBUG ] zapping /dev/sdb on ebin02 +[2020-03-03 20:36:24,630][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 20:36:24,631][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 20:36:24,791][ebin02][DEBUG ] detect machine type +[2020-03-03 20:36:24,821][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:36:24,827][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:36:24,827][ebin02][DEBUG ] zeroing last few blocks of device +[2020-03-03 20:36:24,835][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:36:24,852][ebin02][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdb +[2020-03-03 20:36:27,229][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:36:27,994][ceph_deploy.mon][WARNING] mon.pine02 monitor is not yet in quorum, tries left: 2 +[2020-03-03 20:36:27,995][ceph_deploy.mon][WARNING] waiting 15 seconds before retrying +[2020-03-03 20:36:43,057][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:36:43,874][ceph_deploy.mon][WARNING] mon.pine02 monitor is not yet in quorum, tries left: 1 +[2020-03-03 20:36:43,874][ceph_deploy.mon][WARNING] waiting 20 seconds before retrying +[2020-03-03 20:37:03,894][ceph_deploy.mon][ERROR ] Some monitors have still not reached quorum: +[2020-03-03 20:37:03,894][ceph_deploy.mon][ERROR ] pine01 +[2020-03-03 20:37:03,894][ceph_deploy.mon][ERROR ] pine02 +[2020-03-03 20:37:12,161][ebin02][WARNING] --> Zapping: /dev/sdb +[2020-03-03 20:37:12,161][ebin02][WARNING] Running command: /bin/dd if=/dev/zero of=/dev/ceph-23610a45-013b-443a-8b68-689a670ca012/osd-block-7bf796f1-dadd-470b-949b-7c5a61693a89 bs=1M count=10 +[2020-03-03 20:37:12,162][ebin02][WARNING] stderr: 10+0 records in +[2020-03-03 20:37:12,162][ebin02][WARNING] 10+0 records out +[2020-03-03 20:37:12,162][ebin02][WARNING] 10485760 bytes (10 MB, 10 MiB) copied, 0.15268 s, 68.7 MB/s +[2020-03-03 20:37:12,166][ebin02][WARNING] --> --destroy was not specified, but zapping a whole device will remove the partition table +[2020-03-03 20:37:12,167][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,167][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,168][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,168][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,168][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,176][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,176][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,177][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,209][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,209][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,217][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,217][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,221][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,221][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,221][ebin02][WARNING] stderr: wipefs: error: /dev/sdb: probing initialization failed: Device or resource busy +[2020-03-03 20:37:12,222][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:37:12,222][ebin02][WARNING] --> RuntimeError: could not complete wipefs on device: /dev/sdb +[2020-03-03 20:37:12,437][ebin02][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:37:12,438][ceph_deploy][ERROR ] RuntimeError: Failed to execute command: /usr/sbin/ceph-volume lvm zap /dev/sdb + +[2020-03-03 20:50:56,731][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:50:56,731][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mon create-initial +[2020-03-03 20:50:56,731][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] subcommand : create-initial +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] keyrings : None +[2020-03-03 20:50:56,732][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:50:56,733][ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts pine01 pine02 +[2020-03-03 20:50:56,733][ceph_deploy.mon][DEBUG ] detecting platform for host pine01 ... +[2020-03-03 20:50:58,409][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:50:58,411][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:50:58,555][pine01][DEBUG ] detect machine type +[2020-03-03 20:50:58,578][pine01][DEBUG ] find the location of an executable +[2020-03-03 20:50:58,583][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-03 20:50:58,584][pine01][DEBUG ] determining if provided host has same hostname in remote +[2020-03-03 20:50:58,584][pine01][DEBUG ] get remote short hostname +[2020-03-03 20:50:58,589][pine01][DEBUG ] deploying mon to pine01 +[2020-03-03 20:50:58,590][pine01][DEBUG ] get remote short hostname +[2020-03-03 20:50:58,594][pine01][DEBUG ] remote hostname: pine01 +[2020-03-03 20:50:58,603][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:50:58,614][pine01][DEBUG ] create the mon path if it does not exist +[2020-03-03 20:50:58,621][pine01][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-pine01/done +[2020-03-03 20:50:58,628][pine01][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-03 20:50:58,634][pine01][DEBUG ] create the init path if it does not exist +[2020-03-03 20:50:58,648][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-03 20:50:59,445][pine01][INFO ] Running command: systemctl enable ceph-mon@pine01 +[2020-03-03 20:51:00,228][pine01][INFO ] Running command: systemctl start ceph-mon@pine01 +[2020-03-03 20:51:02,312][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:51:03,182][pine01][DEBUG ] ******************************************************************************** +[2020-03-03 20:51:03,183][pine01][DEBUG ] status for monitor: mon.pine01 +[2020-03-03 20:51:03,184][pine01][DEBUG ] { +[2020-03-03 20:51:03,184][pine01][DEBUG ] "election_epoch": 4, +[2020-03-03 20:51:03,185][pine01][DEBUG ] "extra_probe_peers": [ +[2020-03-03 20:51:03,185][pine01][DEBUG ] { +[2020-03-03 20:51:03,185][pine01][DEBUG ] "addrvec": [ +[2020-03-03 20:51:03,185][pine01][DEBUG ] { +[2020-03-03 20:51:03,185][pine01][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:3300", +[2020-03-03 20:51:03,186][pine01][DEBUG ] "nonce": 0, +[2020-03-03 20:51:03,186][pine01][DEBUG ] "type": "v2" +[2020-03-03 20:51:03,186][pine01][DEBUG ] }, +[2020-03-03 20:51:03,186][pine01][DEBUG ] { +[2020-03-03 20:51:03,186][pine01][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789", +[2020-03-03 20:51:03,187][pine01][DEBUG ] "nonce": 0, +[2020-03-03 20:51:03,187][pine01][DEBUG ] "type": "v1" +[2020-03-03 20:51:03,187][pine01][DEBUG ] } +[2020-03-03 20:51:03,187][pine01][DEBUG ] ] +[2020-03-03 20:51:03,187][pine01][DEBUG ] } +[2020-03-03 20:51:03,188][pine01][DEBUG ] ], +[2020-03-03 20:51:03,188][pine01][DEBUG ] "feature_map": { +[2020-03-03 20:51:03,188][pine01][DEBUG ] "mon": [ +[2020-03-03 20:51:03,188][pine01][DEBUG ] { +[2020-03-03 20:51:03,188][pine01][DEBUG ] "features": "0x3ffddff8ffacffff", +[2020-03-03 20:51:03,189][pine01][DEBUG ] "num": 1, +[2020-03-03 20:51:03,189][pine01][DEBUG ] "release": "luminous" +[2020-03-03 20:51:03,189][pine01][DEBUG ] } +[2020-03-03 20:51:03,189][pine01][DEBUG ] ] +[2020-03-03 20:51:03,189][pine01][DEBUG ] }, +[2020-03-03 20:51:03,190][pine01][DEBUG ] "features": { +[2020-03-03 20:51:03,190][pine01][DEBUG ] "quorum_con": "4611087854031667199", +[2020-03-03 20:51:03,190][pine01][DEBUG ] "quorum_mon": [ +[2020-03-03 20:51:03,190][pine01][DEBUG ] "kraken", +[2020-03-03 20:51:03,190][pine01][DEBUG ] "luminous", +[2020-03-03 20:51:03,191][pine01][DEBUG ] "mimic", +[2020-03-03 20:51:03,191][pine01][DEBUG ] "osdmap-prune", +[2020-03-03 20:51:03,191][pine01][DEBUG ] "nautilus" +[2020-03-03 20:51:03,191][pine01][DEBUG ] ], +[2020-03-03 20:51:03,191][pine01][DEBUG ] "required_con": "2449958747315912708", +[2020-03-03 20:51:03,192][pine01][DEBUG ] "required_mon": [ +[2020-03-03 20:51:03,192][pine01][DEBUG ] "kraken", +[2020-03-03 20:51:03,192][pine01][DEBUG ] "luminous", +[2020-03-03 20:51:03,192][pine01][DEBUG ] "mimic", +[2020-03-03 20:51:03,192][pine01][DEBUG ] "osdmap-prune", +[2020-03-03 20:51:03,193][pine01][DEBUG ] "nautilus" +[2020-03-03 20:51:03,193][pine01][DEBUG ] ] +[2020-03-03 20:51:03,193][pine01][DEBUG ] }, +[2020-03-03 20:51:03,193][pine01][DEBUG ] "monmap": { +[2020-03-03 20:51:03,193][pine01][DEBUG ] "created": "2020-03-03 20:33:27.003562", +[2020-03-03 20:51:03,194][pine01][DEBUG ] "epoch": 1, +[2020-03-03 20:51:03,194][pine01][DEBUG ] "features": { +[2020-03-03 20:51:03,194][pine01][DEBUG ] "optional": [], +[2020-03-03 20:51:03,194][pine01][DEBUG ] "persistent": [ +[2020-03-03 20:51:03,194][pine01][DEBUG ] "kraken", +[2020-03-03 20:51:03,195][pine01][DEBUG ] "luminous", +[2020-03-03 20:51:03,195][pine01][DEBUG ] "mimic", +[2020-03-03 20:51:03,195][pine01][DEBUG ] "osdmap-prune", +[2020-03-03 20:51:03,195][pine01][DEBUG ] "nautilus" +[2020-03-03 20:51:03,195][pine01][DEBUG ] ] +[2020-03-03 20:51:03,196][pine01][DEBUG ] }, +[2020-03-03 20:51:03,196][pine01][DEBUG ] "fsid": "29ef4020-303a-4b2e-aa24-a1e20e5ba21c", +[2020-03-03 20:51:03,196][pine01][DEBUG ] "min_mon_release": 14, +[2020-03-03 20:51:03,196][pine01][DEBUG ] "min_mon_release_name": "nautilus", +[2020-03-03 20:51:03,196][pine01][DEBUG ] "modified": "2020-03-03 20:33:27.003562", +[2020-03-03 20:51:03,197][pine01][DEBUG ] "mons": [ +[2020-03-03 20:51:03,197][pine01][DEBUG ] { +[2020-03-03 20:51:03,197][pine01][DEBUG ] "addr": "192.168.10.160:6789/0", +[2020-03-03 20:51:03,197][pine01][DEBUG ] "name": "pine01", +[2020-03-03 20:51:03,197][pine01][DEBUG ] "public_addr": "192.168.10.160:6789/0", +[2020-03-03 20:51:03,198][pine01][DEBUG ] "public_addrs": { +[2020-03-03 20:51:03,198][pine01][DEBUG ] "addrvec": [ +[2020-03-03 20:51:03,198][pine01][DEBUG ] { +[2020-03-03 20:51:03,198][pine01][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-03 20:51:03,198][pine01][DEBUG ] "nonce": 0, +[2020-03-03 20:51:03,198][pine01][DEBUG ] "type": "v2" +[2020-03-03 20:51:03,199][pine01][DEBUG ] }, +[2020-03-03 20:51:03,199][pine01][DEBUG ] { +[2020-03-03 20:51:03,199][pine01][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-03 20:51:03,199][pine01][DEBUG ] "nonce": 0, +[2020-03-03 20:51:03,199][pine01][DEBUG ] "type": "v1" +[2020-03-03 20:51:03,200][pine01][DEBUG ] } +[2020-03-03 20:51:03,200][pine01][DEBUG ] ] +[2020-03-03 20:51:03,200][pine01][DEBUG ] }, +[2020-03-03 20:51:03,200][pine01][DEBUG ] "rank": 0 +[2020-03-03 20:51:03,200][pine01][DEBUG ] }, +[2020-03-03 20:51:03,201][pine01][DEBUG ] { +[2020-03-03 20:51:03,201][pine01][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789/0", +[2020-03-03 20:51:03,201][pine01][DEBUG ] "name": "pine02", +[2020-03-03 20:51:03,201][pine01][DEBUG ] "public_addr": "[fd87:3937:d2b7::b5f]:6789/0", +[2020-03-03 20:51:03,201][pine01][DEBUG ] "public_addrs": { +[2020-03-03 20:51:03,202][pine01][DEBUG ] "addrvec": [ +[2020-03-03 20:51:03,202][pine01][DEBUG ] { +[2020-03-03 20:51:03,202][pine01][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:3300", +[2020-03-03 20:51:03,202][pine01][DEBUG ] "nonce": 0, +[2020-03-03 20:51:03,202][pine01][DEBUG ] "type": "v2" +[2020-03-03 20:51:03,203][pine01][DEBUG ] }, +[2020-03-03 20:51:03,203][pine01][DEBUG ] { +[2020-03-03 20:51:03,203][pine01][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789", +[2020-03-03 20:51:03,203][pine01][DEBUG ] "nonce": 0, +[2020-03-03 20:51:03,203][pine01][DEBUG ] "type": "v1" +[2020-03-03 20:51:03,203][pine01][DEBUG ] } +[2020-03-03 20:51:03,204][pine01][DEBUG ] ] +[2020-03-03 20:51:03,204][pine01][DEBUG ] }, +[2020-03-03 20:51:03,204][pine01][DEBUG ] "rank": 1 +[2020-03-03 20:51:03,204][pine01][DEBUG ] } +[2020-03-03 20:51:03,204][pine01][DEBUG ] ] +[2020-03-03 20:51:03,205][pine01][DEBUG ] }, +[2020-03-03 20:51:03,205][pine01][DEBUG ] "name": "pine01", +[2020-03-03 20:51:03,205][pine01][DEBUG ] "outside_quorum": [], +[2020-03-03 20:51:03,205][pine01][DEBUG ] "quorum": [ +[2020-03-03 20:51:03,205][pine01][DEBUG ] 0, +[2020-03-03 20:51:03,205][pine01][DEBUG ] 1 +[2020-03-03 20:51:03,205][pine01][DEBUG ] ], +[2020-03-03 20:51:03,206][pine01][DEBUG ] "quorum_age": 34, +[2020-03-03 20:51:03,206][pine01][DEBUG ] "rank": 0, +[2020-03-03 20:51:03,206][pine01][DEBUG ] "state": "leader", +[2020-03-03 20:51:03,206][pine01][DEBUG ] "sync_provider": [] +[2020-03-03 20:51:03,206][pine01][DEBUG ] } +[2020-03-03 20:51:03,206][pine01][DEBUG ] ******************************************************************************** +[2020-03-03 20:51:03,206][pine01][INFO ] monitor: mon.pine01 is running +[2020-03-03 20:51:03,214][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:51:04,083][ceph_deploy.mon][DEBUG ] detecting platform for host pine02 ... +[2020-03-03 20:51:05,662][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:51:05,664][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:51:05,785][pine02][DEBUG ] detect machine type +[2020-03-03 20:51:05,812][pine02][DEBUG ] find the location of an executable +[2020-03-03 20:51:05,819][ceph_deploy.mon][INFO ] distro info: debian 10.3 buster +[2020-03-03 20:51:05,819][pine02][DEBUG ] determining if provided host has same hostname in remote +[2020-03-03 20:51:05,820][pine02][DEBUG ] get remote short hostname +[2020-03-03 20:51:05,826][pine02][DEBUG ] deploying mon to pine02 +[2020-03-03 20:51:05,827][pine02][DEBUG ] get remote short hostname +[2020-03-03 20:51:05,834][pine02][DEBUG ] remote hostname: pine02 +[2020-03-03 20:51:05,846][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:51:05,858][pine02][DEBUG ] create the mon path if it does not exist +[2020-03-03 20:51:05,867][pine02][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-pine02/done +[2020-03-03 20:51:05,874][pine02][DEBUG ] create a done file to avoid re-doing the mon deployment +[2020-03-03 20:51:05,880][pine02][DEBUG ] create the init path if it does not exist +[2020-03-03 20:51:05,898][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-03 20:51:06,586][pine02][INFO ] Running command: systemctl enable ceph-mon@pine02 +[2020-03-03 20:51:07,269][pine02][INFO ] Running command: systemctl start ceph-mon@pine02 +[2020-03-03 20:51:09,354][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:51:10,123][pine02][DEBUG ] ******************************************************************************** +[2020-03-03 20:51:10,123][pine02][DEBUG ] status for monitor: mon.pine02 +[2020-03-03 20:51:10,123][pine02][DEBUG ] { +[2020-03-03 20:51:10,123][pine02][DEBUG ] "election_epoch": 4, +[2020-03-03 20:51:10,123][pine02][DEBUG ] "extra_probe_peers": [ +[2020-03-03 20:51:10,123][pine02][DEBUG ] { +[2020-03-03 20:51:10,123][pine02][DEBUG ] "addrvec": [ +[2020-03-03 20:51:10,123][pine02][DEBUG ] { +[2020-03-03 20:51:10,124][pine02][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-03 20:51:10,124][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:51:10,124][pine02][DEBUG ] "type": "v2" +[2020-03-03 20:51:10,124][pine02][DEBUG ] }, +[2020-03-03 20:51:10,124][pine02][DEBUG ] { +[2020-03-03 20:51:10,124][pine02][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-03 20:51:10,124][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:51:10,124][pine02][DEBUG ] "type": "v1" +[2020-03-03 20:51:10,124][pine02][DEBUG ] } +[2020-03-03 20:51:10,124][pine02][DEBUG ] ] +[2020-03-03 20:51:10,124][pine02][DEBUG ] } +[2020-03-03 20:51:10,124][pine02][DEBUG ] ], +[2020-03-03 20:51:10,124][pine02][DEBUG ] "feature_map": { +[2020-03-03 20:51:10,124][pine02][DEBUG ] "mon": [ +[2020-03-03 20:51:10,124][pine02][DEBUG ] { +[2020-03-03 20:51:10,124][pine02][DEBUG ] "features": "0x3ffddff8ffacffff", +[2020-03-03 20:51:10,124][pine02][DEBUG ] "num": 1, +[2020-03-03 20:51:10,124][pine02][DEBUG ] "release": "luminous" +[2020-03-03 20:51:10,124][pine02][DEBUG ] } +[2020-03-03 20:51:10,124][pine02][DEBUG ] ] +[2020-03-03 20:51:10,124][pine02][DEBUG ] }, +[2020-03-03 20:51:10,124][pine02][DEBUG ] "features": { +[2020-03-03 20:51:10,125][pine02][DEBUG ] "quorum_con": "4611087854031667199", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "quorum_mon": [ +[2020-03-03 20:51:10,125][pine02][DEBUG ] "kraken", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "luminous", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "mimic", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "osdmap-prune", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "nautilus" +[2020-03-03 20:51:10,125][pine02][DEBUG ] ], +[2020-03-03 20:51:10,125][pine02][DEBUG ] "required_con": "2449958747315912708", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "required_mon": [ +[2020-03-03 20:51:10,125][pine02][DEBUG ] "kraken", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "luminous", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "mimic", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "osdmap-prune", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "nautilus" +[2020-03-03 20:51:10,125][pine02][DEBUG ] ] +[2020-03-03 20:51:10,125][pine02][DEBUG ] }, +[2020-03-03 20:51:10,125][pine02][DEBUG ] "monmap": { +[2020-03-03 20:51:10,125][pine02][DEBUG ] "created": "2020-03-03 20:33:27.003562", +[2020-03-03 20:51:10,125][pine02][DEBUG ] "epoch": 1, +[2020-03-03 20:51:10,125][pine02][DEBUG ] "features": { +[2020-03-03 20:51:10,125][pine02][DEBUG ] "optional": [], +[2020-03-03 20:51:10,126][pine02][DEBUG ] "persistent": [ +[2020-03-03 20:51:10,126][pine02][DEBUG ] "kraken", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "luminous", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "mimic", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "osdmap-prune", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "nautilus" +[2020-03-03 20:51:10,126][pine02][DEBUG ] ] +[2020-03-03 20:51:10,126][pine02][DEBUG ] }, +[2020-03-03 20:51:10,126][pine02][DEBUG ] "fsid": "29ef4020-303a-4b2e-aa24-a1e20e5ba21c", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "min_mon_release": 14, +[2020-03-03 20:51:10,126][pine02][DEBUG ] "min_mon_release_name": "nautilus", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "modified": "2020-03-03 20:33:27.003562", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "mons": [ +[2020-03-03 20:51:10,126][pine02][DEBUG ] { +[2020-03-03 20:51:10,126][pine02][DEBUG ] "addr": "192.168.10.160:6789/0", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "name": "pine01", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "public_addr": "192.168.10.160:6789/0", +[2020-03-03 20:51:10,126][pine02][DEBUG ] "public_addrs": { +[2020-03-03 20:51:10,126][pine02][DEBUG ] "addrvec": [ +[2020-03-03 20:51:10,126][pine02][DEBUG ] { +[2020-03-03 20:51:10,126][pine02][DEBUG ] "addr": "192.168.10.160:3300", +[2020-03-03 20:51:10,127][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:51:10,127][pine02][DEBUG ] "type": "v2" +[2020-03-03 20:51:10,127][pine02][DEBUG ] }, +[2020-03-03 20:51:10,127][pine02][DEBUG ] { +[2020-03-03 20:51:10,127][pine02][DEBUG ] "addr": "192.168.10.160:6789", +[2020-03-03 20:51:10,127][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:51:10,127][pine02][DEBUG ] "type": "v1" +[2020-03-03 20:51:10,127][pine02][DEBUG ] } +[2020-03-03 20:51:10,127][pine02][DEBUG ] ] +[2020-03-03 20:51:10,127][pine02][DEBUG ] }, +[2020-03-03 20:51:10,127][pine02][DEBUG ] "rank": 0 +[2020-03-03 20:51:10,127][pine02][DEBUG ] }, +[2020-03-03 20:51:10,127][pine02][DEBUG ] { +[2020-03-03 20:51:10,127][pine02][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789/0", +[2020-03-03 20:51:10,127][pine02][DEBUG ] "name": "pine02", +[2020-03-03 20:51:10,127][pine02][DEBUG ] "public_addr": "[fd87:3937:d2b7::b5f]:6789/0", +[2020-03-03 20:51:10,127][pine02][DEBUG ] "public_addrs": { +[2020-03-03 20:51:10,127][pine02][DEBUG ] "addrvec": [ +[2020-03-03 20:51:10,127][pine02][DEBUG ] { +[2020-03-03 20:51:10,127][pine02][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:3300", +[2020-03-03 20:51:10,127][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:51:10,127][pine02][DEBUG ] "type": "v2" +[2020-03-03 20:51:10,128][pine02][DEBUG ] }, +[2020-03-03 20:51:10,128][pine02][DEBUG ] { +[2020-03-03 20:51:10,128][pine02][DEBUG ] "addr": "[fd87:3937:d2b7::b5f]:6789", +[2020-03-03 20:51:10,128][pine02][DEBUG ] "nonce": 0, +[2020-03-03 20:51:10,128][pine02][DEBUG ] "type": "v1" +[2020-03-03 20:51:10,128][pine02][DEBUG ] } +[2020-03-03 20:51:10,128][pine02][DEBUG ] ] +[2020-03-03 20:51:10,128][pine02][DEBUG ] }, +[2020-03-03 20:51:10,128][pine02][DEBUG ] "rank": 1 +[2020-03-03 20:51:10,128][pine02][DEBUG ] } +[2020-03-03 20:51:10,128][pine02][DEBUG ] ] +[2020-03-03 20:51:10,128][pine02][DEBUG ] }, +[2020-03-03 20:51:10,128][pine02][DEBUG ] "name": "pine02", +[2020-03-03 20:51:10,128][pine02][DEBUG ] "outside_quorum": [], +[2020-03-03 20:51:10,128][pine02][DEBUG ] "quorum": [ +[2020-03-03 20:51:10,128][pine02][DEBUG ] 0, +[2020-03-03 20:51:10,128][pine02][DEBUG ] 1 +[2020-03-03 20:51:10,128][pine02][DEBUG ] ], +[2020-03-03 20:51:10,128][pine02][DEBUG ] "quorum_age": 41, +[2020-03-03 20:51:10,128][pine02][DEBUG ] "rank": 1, +[2020-03-03 20:51:10,128][pine02][DEBUG ] "state": "peon", +[2020-03-03 20:51:10,128][pine02][DEBUG ] "sync_provider": [] +[2020-03-03 20:51:10,129][pine02][DEBUG ] } +[2020-03-03 20:51:10,129][pine02][DEBUG ] ******************************************************************************** +[2020-03-03 20:51:10,129][pine02][INFO ] monitor: mon.pine02 is running +[2020-03-03 20:51:10,137][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:51:10,857][ceph_deploy.mon][INFO ] processing monitor mon.pine01 +[2020-03-03 20:51:12,552][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:51:12,554][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:51:12,698][pine01][DEBUG ] detect machine type +[2020-03-03 20:51:12,722][pine01][DEBUG ] find the location of an executable +[2020-03-03 20:51:12,737][pine01][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:51:13,608][ceph_deploy.mon][INFO ] mon.pine01 monitor has reached quorum! +[2020-03-03 20:51:13,608][ceph_deploy.mon][INFO ] processing monitor mon.pine02 +[2020-03-03 20:51:15,209][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:51:15,211][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:51:15,329][pine02][DEBUG ] detect machine type +[2020-03-03 20:51:15,358][pine02][DEBUG ] find the location of an executable +[2020-03-03 20:51:15,374][pine02][INFO ] Running command: ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.pine02.asok mon_status +[2020-03-03 20:51:16,145][ceph_deploy.mon][INFO ] mon.pine02 monitor has reached quorum! +[2020-03-03 20:51:16,146][ceph_deploy.mon][INFO ] all initial monitors are running and have formed quorum +[2020-03-03 20:51:16,146][ceph_deploy.mon][INFO ] Running gatherkeys... +[2020-03-03 20:51:16,152][ceph_deploy.gatherkeys][INFO ] Storing keys in temp directory /tmp/tmp525mzS +[2020-03-03 20:51:17,853][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:51:17,855][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:51:17,998][pine01][DEBUG ] detect machine type +[2020-03-03 20:51:18,022][pine01][DEBUG ] get remote short hostname +[2020-03-03 20:51:18,027][pine01][DEBUG ] fetch remote file +[2020-03-03 20:51:18,041][pine01][INFO ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.pine01.asok mon_status +[2020-03-03 20:51:18,971][pine01][INFO ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-pine01/keyring auth get client.admin +[2020-03-03 20:51:21,003][pine01][INFO ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-pine01/keyring auth get client.bootstrap-mds +[2020-03-03 20:51:23,036][pine01][INFO ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-pine01/keyring auth get client.bootstrap-mgr +[2020-03-03 20:51:25,070][pine01][INFO ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-pine01/keyring auth get client.bootstrap-osd +[2020-03-03 20:51:27,100][pine01][INFO ] Running command: /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-pine01/keyring auth get client.bootstrap-rgw +[2020-03-03 20:51:29,125][ceph_deploy.gatherkeys][INFO ] Storing ceph.client.admin.keyring +[2020-03-03 20:51:29,126][ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mds.keyring +[2020-03-03 20:51:29,126][ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mgr.keyring +[2020-03-03 20:51:29,127][ceph_deploy.gatherkeys][INFO ] keyring 'ceph.mon.keyring' already exists +[2020-03-03 20:51:29,127][ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-osd.keyring +[2020-03-03 20:51:29,128][ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-rgw.keyring +[2020-03-03 20:51:29,129][ceph_deploy.gatherkeys][INFO ] Destroy temp directory /tmp/tmp525mzS +[2020-03-03 20:52:25,195][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin pine01 pine02 riot01 +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] client : ['pine01', 'pine02', 'riot01'] +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:52:25,195][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:52:25,196][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:52:25,196][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to pine01 +[2020-03-03 20:52:26,822][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:52:26,824][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:52:26,969][pine01][DEBUG ] detect machine type +[2020-03-03 20:52:26,993][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:52:27,012][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to pine02 +[2020-03-03 20:52:28,624][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:52:28,626][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:52:28,747][pine02][DEBUG ] detect machine type +[2020-03-03 20:52:28,775][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:52:28,796][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-03 20:52:30,321][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 20:52:30,323][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 20:52:30,493][riot01][DEBUG ] detect machine type +[2020-03-03 20:52:30,535][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:52:52,470][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mgr create riot01 +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] mgr : [('riot01', 'riot01')] +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:52:52,470][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:52:52,470][ceph_deploy.mgr][DEBUG ] Deploying mgr, cluster ceph hosts riot01:riot01 +[2020-03-03 20:52:53,954][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 20:52:53,955][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 20:52:54,125][riot01][DEBUG ] detect machine type +[2020-03-03 20:52:54,166][ceph_deploy.mgr][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:52:54,166][ceph_deploy.mgr][DEBUG ] remote host will use systemd +[2020-03-03 20:52:54,168][ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to riot01 +[2020-03-03 20:52:54,168][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:52:54,198][riot01][WARNING] mgr keyring does not exist yet, creating one +[2020-03-03 20:52:54,198][riot01][DEBUG ] create a keyring file +[2020-03-03 20:52:54,213][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-03 20:52:54,231][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.riot01 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-riot01/keyring +[2020-03-03 20:52:55,113][riot01][ERROR ] Traceback (most recent call last): +[2020-03-03 20:52:55,113][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-03 20:52:55,113][riot01][ERROR ] retval = main() +[2020-03-03 20:52:55,114][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-03 20:52:55,114][riot01][ERROR ] conffile=conffile) +[2020-03-03 20:52:55,114][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-03 20:52:55,114][riot01][ERROR ] raise Exception("timed out") +[2020-03-03 20:52:55,114][riot01][ERROR ] Exception: timed out +[2020-03-03 20:52:55,115][riot01][ERROR ] exit code from command was: 1 +[2020-03-03 20:52:55,115][ceph_deploy.mgr][ERROR ] could not create mgr +[2020-03-03 20:52:55,115][ceph_deploy][ERROR ] GenericError: Failed to create 1 MGRs + +[2020-03-03 20:53:22,331][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root mgr create pine01 pine02 +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] mgr : [('pine01', 'pine01'), ('pine02', 'pine02')] +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:53:22,332][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:53:22,332][ceph_deploy.mgr][DEBUG ] Deploying mgr, cluster ceph hosts pine01:pine01 pine02:pine02 +[2020-03-03 20:53:23,942][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 20:53:23,944][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 20:53:24,089][pine01][DEBUG ] detect machine type +[2020-03-03 20:53:24,113][ceph_deploy.mgr][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:53:24,113][ceph_deploy.mgr][DEBUG ] remote host will use systemd +[2020-03-03 20:53:24,115][ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to pine01 +[2020-03-03 20:53:24,115][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:53:24,130][pine01][WARNING] mgr keyring does not exist yet, creating one +[2020-03-03 20:53:24,131][pine01][DEBUG ] create a keyring file +[2020-03-03 20:53:24,145][pine01][DEBUG ] create path recursively if it doesn't exist +[2020-03-03 20:53:24,158][pine01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.pine01 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-pine01/keyring +[2020-03-03 20:53:26,313][pine01][INFO ] Running command: systemctl enable ceph-mgr@pine01 +[2020-03-03 20:53:27,099][pine01][INFO ] Running command: systemctl start ceph-mgr@pine01 +[2020-03-03 20:53:27,180][pine01][INFO ] Running command: systemctl enable ceph.target +[2020-03-03 20:53:29,616][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 20:53:29,618][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 20:53:29,736][pine02][DEBUG ] detect machine type +[2020-03-03 20:53:29,762][ceph_deploy.mgr][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:53:29,763][ceph_deploy.mgr][DEBUG ] remote host will use systemd +[2020-03-03 20:53:29,763][ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to pine02 +[2020-03-03 20:53:29,764][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:53:29,783][pine02][WARNING] mgr keyring does not exist yet, creating one +[2020-03-03 20:53:29,783][pine02][DEBUG ] create a keyring file +[2020-03-03 20:53:29,796][pine02][DEBUG ] create path recursively if it doesn't exist +[2020-03-03 20:53:29,812][pine02][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.pine02 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-pine02/keyring +[2020-03-03 20:53:31,614][pine02][INFO ] Running command: systemctl enable ceph-mgr@pine02 +[2020-03-03 20:53:32,298][pine02][INFO ] Running command: systemctl start ceph-mgr@pine02 +[2020-03-03 20:53:32,379][pine02][INFO ] Running command: systemctl enable ceph.target +[2020-03-03 20:53:44,061][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:53:44,061][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd list ebin01 ebin02 +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] subcommand : list +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] host : ['ebin01', 'ebin02'] +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:53:44,062][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:53:46,390][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 20:53:46,391][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 20:53:46,580][ebin01][DEBUG ] detect machine type +[2020-03-03 20:53:46,611][ebin01][DEBUG ] find the location of an executable +[2020-03-03 20:53:46,619][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:53:46,620][ceph_deploy.osd][DEBUG ] Listing disks on ebin01... +[2020-03-03 20:53:46,620][ebin01][DEBUG ] find the location of an executable +[2020-03-03 20:53:46,639][ebin01][INFO ] Running command: /usr/sbin/ceph-volume lvm list +[2020-03-03 20:53:48,881][ebin01][DEBUG ] +[2020-03-03 20:53:48,882][ebin01][DEBUG ] +[2020-03-03 20:53:48,882][ebin01][DEBUG ] ====== osd.0 ======= +[2020-03-03 20:53:48,883][ebin01][DEBUG ] +[2020-03-03 20:53:48,883][ebin01][DEBUG ] [block] /dev/ceph-0ab81ebc-c87d-4f64-9dd7-a434ee256a64/osd-block-59a20cc8-f77c-4851-a06d-45e57a2562d3 +[2020-03-03 20:53:48,883][ebin01][DEBUG ] +[2020-03-03 20:53:48,883][ebin01][DEBUG ] block device /dev/ceph-0ab81ebc-c87d-4f64-9dd7-a434ee256a64/osd-block-59a20cc8-f77c-4851-a06d-45e57a2562d3 +[2020-03-03 20:53:48,884][ebin01][DEBUG ] block uuid KWoO4Y-Qu5f-fmaq-LY5F-CJwm-Sh9J-rYKPfg +[2020-03-03 20:53:48,884][ebin01][DEBUG ] cephx lockbox secret +[2020-03-03 20:53:48,884][ebin01][DEBUG ] cluster fsid 18a73a87-beb2-4997-95dd-2a42fb758b13 +[2020-03-03 20:53:48,884][ebin01][DEBUG ] cluster name ceph +[2020-03-03 20:53:48,885][ebin01][DEBUG ] crush device class None +[2020-03-03 20:53:48,885][ebin01][DEBUG ] encrypted 0 +[2020-03-03 20:53:48,885][ebin01][DEBUG ] osd fsid 59a20cc8-f77c-4851-a06d-45e57a2562d3 +[2020-03-03 20:53:48,885][ebin01][DEBUG ] osd id 0 +[2020-03-03 20:53:48,885][ebin01][DEBUG ] type block +[2020-03-03 20:53:48,886][ebin01][DEBUG ] vdo 0 +[2020-03-03 20:53:48,886][ebin01][DEBUG ] devices /dev/sda +[2020-03-03 20:53:51,323][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 20:53:51,325][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 20:53:51,483][ebin02][DEBUG ] detect machine type +[2020-03-03 20:53:51,515][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:53:51,523][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:53:51,524][ceph_deploy.osd][DEBUG ] Listing disks on ebin02... +[2020-03-03 20:53:51,524][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:53:51,543][ebin02][INFO ] Running command: /usr/sbin/ceph-volume lvm list +[2020-03-03 20:53:53,475][ebin02][WARNING] No valid Ceph devices found +[2020-03-03 20:53:53,641][ebin02][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:53:53,642][ceph_deploy][ERROR ] RuntimeError: Failed to execute command: /usr/sbin/ceph-volume lvm list + +[2020-03-03 20:54:01,325][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root disk zap ebin02 /dev/sda +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] subcommand : zap +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:54:01,325][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:54:01,326][ceph_deploy.cli][INFO ] disk : ['/dev/sda'] +[2020-03-03 20:54:01,326][ceph_deploy.osd][DEBUG ] zapping /dev/sda on ebin02 +[2020-03-03 20:54:03,608][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 20:54:03,610][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 20:54:03,769][ebin02][DEBUG ] detect machine type +[2020-03-03 20:54:03,800][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:54:03,808][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:54:03,808][ebin02][DEBUG ] zeroing last few blocks of device +[2020-03-03 20:54:03,815][ebin02][DEBUG ] find the location of an executable +[2020-03-03 20:54:03,836][ebin02][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sda +[2020-03-03 20:54:47,536][ebin02][WARNING] --> Zapping: /dev/sda +[2020-03-03 20:54:47,537][ebin02][WARNING] Running command: /bin/dd if=/dev/zero of=/dev/ceph-e4e20c91-d359-4365-8866-89819da498da/osd-block-aecef4d2-31d5-4e48-bdb8-30b94fc461bb bs=1M count=10 +[2020-03-03 20:54:47,538][ebin02][WARNING] stderr: 10+0 records in +[2020-03-03 20:54:47,538][ebin02][WARNING] 10+0 records out +[2020-03-03 20:54:47,538][ebin02][WARNING] 10485760 bytes (10 MB, 10 MiB) copied, 0.118145 s, 88.8 MB/s +[2020-03-03 20:54:47,538][ebin02][WARNING] --> --destroy was not specified, but zapping a whole device will remove the partition table +[2020-03-03 20:54:47,542][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,546][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,550][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,557][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,559][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,567][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,571][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,574][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,578][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,582][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,586][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,594][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,595][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,596][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,598][ebin02][WARNING] stderr: wipefs: error: /dev/sda: probing initialization failed: Device or resource busy +[2020-03-03 20:54:47,601][ebin02][WARNING] --> failed to wipefs device, will try again to workaround probable race condition +[2020-03-03 20:54:47,602][ebin02][WARNING] --> RuntimeError: could not complete wipefs on device: /dev/sda +[2020-03-03 20:54:47,767][ebin02][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:54:47,768][ceph_deploy][ERROR ] RuntimeError: Failed to execute command: /usr/sbin/ceph-volume lvm zap /dev/sda + +[2020-03-03 20:55:52,509][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sda ebin01 +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] journal : None +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] host : ebin01 +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] filestore : None +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] data : /dev/sda +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] block_db : None +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:55:52,510][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 20:55:52,511][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda +[2020-03-03 20:55:54,747][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 20:55:54,748][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 20:55:54,908][ebin01][DEBUG ] detect machine type +[2020-03-03 20:55:54,941][ebin01][DEBUG ] find the location of an executable +[2020-03-03 20:55:54,949][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:55:54,949][ceph_deploy.osd][DEBUG ] Deploying osd to ebin01 +[2020-03-03 20:55:54,950][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:55:54,970][ebin01][WARNING] osd keyring does not exist yet, creating one +[2020-03-03 20:55:54,971][ebin01][DEBUG ] create a keyring file +[2020-03-03 20:55:54,986][ebin01][DEBUG ] find the location of an executable +[2020-03-03 20:55:55,003][ebin01][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-03 20:56:08,938][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 20:56:08,938][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 6f0f2140-9f0f-4de3-b45a-4aed93f84322 +[2020-03-03 20:56:08,941][ebin01][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874 /dev/sda +[2020-03-03 20:56:08,942][ebin01][WARNING] stdout: Physical volume "/dev/sda" successfully created. +[2020-03-03 20:56:08,945][ebin01][WARNING] stdout: Volume group "ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874" successfully created +[2020-03-03 20:56:08,945][ebin01][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-6f0f2140-9f0f-4de3-b45a-4aed93f84322 ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874 +[2020-03-03 20:56:08,945][ebin01][WARNING] stdout: Logical volume "osd-block-6f0f2140-9f0f-4de3-b45a-4aed93f84322" created. +[2020-03-03 20:56:08,949][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 20:56:08,949][ebin01][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 +[2020-03-03 20:56:08,950][ebin01][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-03 20:56:08,953][ebin01][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-03 20:56:08,953][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874/osd-block-6f0f2140-9f0f-4de3-b45a-4aed93f84322 +[2020-03-03 20:56:08,954][ebin01][WARNING] stderr: /bin/chown: cannot access '/dev/ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874/osd-block-6f0f2140-9f0f-4de3-b45a-4aed93f84322': No such file or directory +[2020-03-03 20:56:08,957][ebin01][WARNING] --> Was unable to complete a new OSD, will rollback changes +[2020-03-03 20:56:08,958][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.0 --yes-i-really-mean-it +[2020-03-03 20:56:08,959][ebin01][WARNING] stderr: 2020-03-03 20:56:06.766 7fa33831e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-03 20:56:08,963][ebin01][WARNING] 2020-03-03 20:56:06.766 7fa33831e0 -1 AuthRegistry(0x7f9c0814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-03 20:56:08,963][ebin01][WARNING] stderr: purged osd.0 +[2020-03-03 20:56:08,964][ebin01][WARNING] --> RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:56:09,128][ebin01][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:56:09,128][ceph_deploy.osd][ERROR ] Failed to execute command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-03 20:56:09,128][ceph_deploy][ERROR ] GenericError: Failed to create 1 OSDs + +[2020-03-03 20:59:09,657][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sda ebin01 +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] username : root +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] journal : None +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] host : ebin01 +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] filestore : None +[2020-03-03 20:59:09,658][ceph_deploy.cli][INFO ] func : +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] data : /dev/sda +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] block_db : None +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 20:59:09,659][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 20:59:09,659][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda +[2020-03-03 20:59:30,781][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 20:59:30,783][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 20:59:30,991][ebin01][DEBUG ] detect machine type +[2020-03-03 20:59:31,023][ebin01][DEBUG ] find the location of an executable +[2020-03-03 20:59:31,031][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 20:59:31,032][ceph_deploy.osd][DEBUG ] Deploying osd to ebin01 +[2020-03-03 20:59:31,033][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 20:59:31,459][ebin01][DEBUG ] find the location of an executable +[2020-03-03 20:59:31,481][ebin01][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-03 20:59:42,105][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 20:59:42,106][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 72de8b10-759d-4eba-b5b0-db5de0f903e0 +[2020-03-03 20:59:42,106][ebin01][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-cc5335d3-3c90-46b8-9d21-c510e671aa48 /dev/sda +[2020-03-03 20:59:42,106][ebin01][WARNING] stderr: Physical volume '/dev/sda' is already in volume group 'ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874' +[2020-03-03 20:59:42,106][ebin01][WARNING] Unable to add physical volume '/dev/sda' to volume group 'ceph-d3f8da3f-65ab-4e91-afa5-fd3722fc9874' +[2020-03-03 20:59:42,106][ebin01][WARNING] /dev/sda: physical volume not initialized. +[2020-03-03 20:59:42,106][ebin01][WARNING] --> Was unable to complete a new OSD, will rollback changes +[2020-03-03 20:59:42,106][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.0 --yes-i-really-mean-it +[2020-03-03 20:59:42,110][ebin01][WARNING] stderr: 2020-03-03 20:59:39.906 7fad7d91e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-03 20:59:42,126][ebin01][WARNING] 2020-03-03 20:59:39.906 7fad7d91e0 -1 AuthRegistry(0x7fa80814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-03 20:59:42,126][ebin01][WARNING] stderr: purged osd.0 +[2020-03-03 20:59:42,126][ebin01][WARNING] --> RuntimeError: command returned non-zero exit status: 5 +[2020-03-03 20:59:42,291][ebin01][ERROR ] RuntimeError: command returned non-zero exit status: 1 +[2020-03-03 20:59:42,291][ceph_deploy.osd][ERROR ] Failed to execute command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-03 20:59:42,292][ceph_deploy][ERROR ] GenericError: Failed to create 1 OSDs + +[2020-03-03 21:01:10,466][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sda ebin01 +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] journal : None +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] host : ebin01 +[2020-03-03 21:01:10,466][ceph_deploy.cli][INFO ] filestore : None +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] data : /dev/sda +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] block_db : None +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:01:10,467][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 21:01:10,467][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda +[2020-03-03 21:01:12,702][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:01:12,704][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:01:12,862][ebin01][DEBUG ] detect machine type +[2020-03-03 21:01:12,894][ebin01][DEBUG ] find the location of an executable +[2020-03-03 21:01:12,902][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:01:12,902][ceph_deploy.osd][DEBUG ] Deploying osd to ebin01 +[2020-03-03 21:01:12,903][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:01:12,927][ebin01][DEBUG ] find the location of an executable +[2020-03-03 21:01:12,946][ebin01][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-03 21:01:43,207][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:01:43,208][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new ff3a69c1-930c-4128-a640-2f85d0f0a860 +[2020-03-03 21:01:43,209][ebin01][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-3da4301b-1b3e-41f3-943d-475852aa6757 /dev/sda +[2020-03-03 21:01:43,209][ebin01][WARNING] stdout: Physical volume "/dev/sda" successfully created. +[2020-03-03 21:01:43,209][ebin01][WARNING] stdout: Volume group "ceph-3da4301b-1b3e-41f3-943d-475852aa6757" successfully created +[2020-03-03 21:01:43,210][ebin01][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-ff3a69c1-930c-4128-a640-2f85d0f0a860 ceph-3da4301b-1b3e-41f3-943d-475852aa6757 +[2020-03-03 21:01:43,211][ebin01][WARNING] stdout: Logical volume "osd-block-ff3a69c1-930c-4128-a640-2f85d0f0a860" created. +[2020-03-03 21:01:43,211][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:01:43,211][ebin01][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 +[2020-03-03 21:01:43,212][ebin01][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-03 21:01:43,212][ebin01][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-03 21:01:43,212][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-3da4301b-1b3e-41f3-943d-475852aa6757/osd-block-ff3a69c1-930c-4128-a640-2f85d0f0a860 +[2020-03-03 21:01:43,213][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-0 +[2020-03-03 21:01:43,213][ebin01][WARNING] Running command: /bin/ln -s /dev/ceph-3da4301b-1b3e-41f3-943d-475852aa6757/osd-block-ff3a69c1-930c-4128-a640-2f85d0f0a860 /var/lib/ceph/osd/ceph-0/block +[2020-03-03 21:01:43,213][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap +[2020-03-03 21:01:43,214][ebin01][WARNING] stderr: 2020-03-03 21:01:30.006 7faab2a1e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-03 21:01:43,215][ebin01][WARNING] 2020-03-03 21:01:30.006 7faab2a1e0 -1 AuthRegistry(0x7fa40814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-03 21:01:43,215][ebin01][WARNING] stderr: got monmap epoch 1 +[2020-03-03 21:01:43,215][ebin01][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQCMt15e8JhxJBAAUg1f+oCXJZTYVqS2tM3g8w== +[2020-03-03 21:01:43,216][ebin01][WARNING] stdout: creating /var/lib/ceph/osd/ceph-0/keyring +[2020-03-03 21:01:43,216][ebin01][WARNING] added entity osd.0 auth(key=AQCMt15e8JhxJBAAUg1f+oCXJZTYVqS2tM3g8w==) +[2020-03-03 21:01:43,216][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring +[2020-03-03 21:01:43,216][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ +[2020-03-03 21:01:43,217][ebin01][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid ff3a69c1-930c-4128-a640-2f85d0f0a860 --setuser ceph --setgroup ceph +[2020-03-03 21:01:43,217][ebin01][WARNING] --> ceph-volume lvm prepare successful for: /dev/sda +[2020-03-03 21:01:43,217][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 +[2020-03-03 21:01:43,218][ebin01][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-3da4301b-1b3e-41f3-943d-475852aa6757/osd-block-ff3a69c1-930c-4128-a640-2f85d0f0a860 --path /var/lib/ceph/osd/ceph-0 --no-mon-config +[2020-03-03 21:01:43,218][ebin01][WARNING] Running command: /bin/ln -snf /dev/ceph-3da4301b-1b3e-41f3-943d-475852aa6757/osd-block-ff3a69c1-930c-4128-a640-2f85d0f0a860 /var/lib/ceph/osd/ceph-0/block +[2020-03-03 21:01:43,219][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block +[2020-03-03 21:01:43,220][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-0 +[2020-03-03 21:01:43,221][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 +[2020-03-03 21:01:43,225][ebin01][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-0-ff3a69c1-930c-4128-a640-2f85d0f0a860 +[2020-03-03 21:01:43,232][ebin01][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-ff3a69c1-930c-4128-a640-2f85d0f0a860.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-03 21:01:43,234][ebin01][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@0 +[2020-03-03 21:01:43,238][ebin01][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-03 21:01:43,254][ebin01][WARNING] Running command: /bin/systemctl start ceph-osd@0 +[2020-03-03 21:01:43,254][ebin01][WARNING] --> ceph-volume lvm activate successful for osd ID: 0 +[2020-03-03 21:01:43,254][ebin01][WARNING] --> ceph-volume lvm create successful for: /dev/sda +[2020-03-03 21:01:48,424][ebin01][INFO ] checking OSD status... +[2020-03-03 21:01:48,424][ebin01][DEBUG ] find the location of an executable +[2020-03-03 21:01:48,443][ebin01][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-03 21:01:49,614][ceph_deploy.osd][DEBUG ] Host ebin01 is now ready for osd use. +[2020-03-03 21:01:56,256][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:01:56,256][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sdb ebin01 +[2020-03-03 21:01:56,256][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] journal : None +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] host : ebin01 +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] filestore : None +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] data : /dev/sdb +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] block_db : None +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:01:56,257][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 21:01:56,258][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb +[2020-03-03 21:01:58,457][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:01:58,458][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:01:58,620][ebin01][DEBUG ] detect machine type +[2020-03-03 21:01:58,654][ebin01][DEBUG ] find the location of an executable +[2020-03-03 21:01:58,662][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:01:58,662][ceph_deploy.osd][DEBUG ] Deploying osd to ebin01 +[2020-03-03 21:01:58,663][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:01:58,685][ebin01][DEBUG ] find the location of an executable +[2020-03-03 21:01:58,705][ebin01][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb +[2020-03-03 21:02:19,575][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:02:19,576][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 9afbd9a8-8bf7-4357-a6b0-4be056f5cdab +[2020-03-03 21:02:19,576][ebin01][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e /dev/sdb +[2020-03-03 21:02:19,576][ebin01][WARNING] stdout: Physical volume "/dev/sdb" successfully created. +[2020-03-03 21:02:19,577][ebin01][WARNING] stdout: Volume group "ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e" successfully created +[2020-03-03 21:02:19,577][ebin01][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e +[2020-03-03 21:02:19,577][ebin01][WARNING] stdout: Logical volume "osd-block-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab" created. +[2020-03-03 21:02:19,578][ebin01][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:02:19,578][ebin01][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1 +[2020-03-03 21:02:19,578][ebin01][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-03 21:02:19,579][ebin01][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-03 21:02:19,579][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e/osd-block-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab +[2020-03-03 21:02:19,579][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-1 +[2020-03-03 21:02:19,579][ebin01][WARNING] Running command: /bin/ln -s /dev/ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e/osd-block-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab /var/lib/ceph/osd/ceph-1/block +[2020-03-03 21:02:19,579][ebin01][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap +[2020-03-03 21:02:19,580][ebin01][WARNING] stderr: 2020-03-03 21:02:10.675 7f80e691e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-03 21:02:19,580][ebin01][WARNING] 2020-03-03 21:02:10.675 7f80e691e0 -1 AuthRegistry(0x7f7c0814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-03 21:02:19,580][ebin01][WARNING] stderr: got monmap epoch 1 +[2020-03-03 21:02:19,581][ebin01][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQC5t15ez86/DhAA0HFc9ug+CBI3Su1TcgHLNg== +[2020-03-03 21:02:19,581][ebin01][WARNING] stdout: creating /var/lib/ceph/osd/ceph-1/keyring +[2020-03-03 21:02:19,581][ebin01][WARNING] added entity osd.1 auth(key=AQC5t15ez86/DhAA0HFc9ug+CBI3Su1TcgHLNg==) +[2020-03-03 21:02:19,581][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring +[2020-03-03 21:02:19,581][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/ +[2020-03-03 21:02:19,582][ebin01][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 9afbd9a8-8bf7-4357-a6b0-4be056f5cdab --setuser ceph --setgroup ceph +[2020-03-03 21:02:19,585][ebin01][WARNING] --> ceph-volume lvm prepare successful for: /dev/sdb +[2020-03-03 21:02:19,585][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 +[2020-03-03 21:02:19,587][ebin01][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e/osd-block-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab --path /var/lib/ceph/osd/ceph-1 --no-mon-config +[2020-03-03 21:02:19,591][ebin01][WARNING] Running command: /bin/ln -snf /dev/ceph-df3d495c-1c7a-4717-80b0-8af7e1e6cd5e/osd-block-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab /var/lib/ceph/osd/ceph-1/block +[2020-03-03 21:02:19,593][ebin01][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block +[2020-03-03 21:02:19,593][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-1 +[2020-03-03 21:02:19,593][ebin01][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 +[2020-03-03 21:02:19,597][ebin01][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-1-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab +[2020-03-03 21:02:19,597][ebin01][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-1-9afbd9a8-8bf7-4357-a6b0-4be056f5cdab.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-03 21:02:19,605][ebin01][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@1 +[2020-03-03 21:02:19,621][ebin01][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@1.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-03 21:02:19,622][ebin01][WARNING] Running command: /bin/systemctl start ceph-osd@1 +[2020-03-03 21:02:19,622][ebin01][WARNING] --> ceph-volume lvm activate successful for osd ID: 1 +[2020-03-03 21:02:19,622][ebin01][WARNING] --> ceph-volume lvm create successful for: /dev/sdb +[2020-03-03 21:02:24,792][ebin01][INFO ] checking OSD status... +[2020-03-03 21:02:24,792][ebin01][DEBUG ] find the location of an executable +[2020-03-03 21:02:24,807][ebin01][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-03 21:02:25,924][ceph_deploy.osd][DEBUG ] Host ebin01 is now ready for osd use. +[2020-03-03 21:02:31,005][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:02:31,005][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sda ebin02 +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] journal : None +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] filestore : None +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] data : /dev/sda +[2020-03-03 21:02:31,006][ceph_deploy.cli][INFO ] block_db : None +[2020-03-03 21:02:31,007][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-03 21:02:31,007][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:02:31,007][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-03 21:02:31,007][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:02:31,007][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 21:02:31,007][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sda +[2020-03-03 21:02:33,374][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:02:33,374][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:02:33,587][ebin02][DEBUG ] detect machine type +[2020-03-03 21:02:33,618][ebin02][DEBUG ] find the location of an executable +[2020-03-03 21:02:33,624][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:02:33,625][ceph_deploy.osd][DEBUG ] Deploying osd to ebin02 +[2020-03-03 21:02:33,625][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:02:33,652][ebin02][WARNING] osd keyring does not exist yet, creating one +[2020-03-03 21:02:33,652][ebin02][DEBUG ] create a keyring file +[2020-03-03 21:02:33,669][ebin02][DEBUG ] find the location of an executable +[2020-03-03 21:02:33,686][ebin02][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda +[2020-03-03 21:03:02,359][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:03:02,359][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 61771fa8-6d0f-48ec-a03b-6bf891b8eac3 +[2020-03-03 21:03:02,359][ebin02][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44 /dev/sda +[2020-03-03 21:03:02,359][ebin02][WARNING] stdout: Physical volume "/dev/sda" successfully created. +[2020-03-03 21:03:02,359][ebin02][WARNING] stdout: Volume group "ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44" successfully created +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-61771fa8-6d0f-48ec-a03b-6bf891b8eac3 ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44 +[2020-03-03 21:03:02,423][ebin02][WARNING] stdout: Logical volume "osd-block-61771fa8-6d0f-48ec-a03b-6bf891b8eac3" created. +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 +[2020-03-03 21:03:02,423][ebin02][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-03 21:03:02,423][ebin02][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44/osd-block-61771fa8-6d0f-48ec-a03b-6bf891b8eac3 +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-0 +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /bin/ln -s /dev/ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44/osd-block-61771fa8-6d0f-48ec-a03b-6bf891b8eac3 /var/lib/ceph/osd/ceph-2/block +[2020-03-03 21:03:02,423][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap +[2020-03-03 21:03:02,423][ebin02][WARNING] stderr: 2020-03-03 21:02:45.230 7fa24141e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-03 21:03:02,423][ebin02][WARNING] 2020-03-03 21:02:45.230 7fa24141e0 -1 AuthRegistry(0x7f9c0814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-03 21:03:02,423][ebin02][WARNING] stderr: got monmap epoch 1 +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-2/keyring --create-keyring --name osd.2 --add-key AQDct15eaGUICxAAFnRSbfeV7uCY91xN8+qhbA== +[2020-03-03 21:03:02,424][ebin02][WARNING] stdout: creating /var/lib/ceph/osd/ceph-2/keyring +[2020-03-03 21:03:02,424][ebin02][WARNING] added entity osd.2 auth(key=AQDct15eaGUICxAAFnRSbfeV7uCY91xN8+qhbA==) +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/ +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 61771fa8-6d0f-48ec-a03b-6bf891b8eac3 --setuser ceph --setgroup ceph +[2020-03-03 21:03:02,424][ebin02][WARNING] --> ceph-volume lvm prepare successful for: /dev/sda +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44/osd-block-61771fa8-6d0f-48ec-a03b-6bf891b8eac3 --path /var/lib/ceph/osd/ceph-2 --no-mon-config +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/ln -snf /dev/ceph-4be08c0e-6cb3-4a39-a6fe-7de4eab46f44/osd-block-61771fa8-6d0f-48ec-a03b-6bf891b8eac3 /var/lib/ceph/osd/ceph-2/block +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-0 +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-2-61771fa8-6d0f-48ec-a03b-6bf891b8eac3 +[2020-03-03 21:03:02,424][ebin02][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-2-61771fa8-6d0f-48ec-a03b-6bf891b8eac3.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-03 21:03:02,424][ebin02][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@2 +[2020-03-03 21:03:02,424][ebin02][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@2.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-03 21:03:02,425][ebin02][WARNING] Running command: /bin/systemctl start ceph-osd@2 +[2020-03-03 21:03:02,425][ebin02][WARNING] --> ceph-volume lvm activate successful for osd ID: 2 +[2020-03-03 21:03:02,425][ebin02][WARNING] --> ceph-volume lvm create successful for: /dev/sda +[2020-03-03 21:03:07,428][ebin02][INFO ] checking OSD status... +[2020-03-03 21:03:07,428][ebin02][DEBUG ] find the location of an executable +[2020-03-03 21:03:07,476][ebin02][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-03 21:03:08,492][ceph_deploy.osd][DEBUG ] Host ebin02 is now ready for osd use. +[2020-03-03 21:03:12,755][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root osd create --data /dev/sdb ebin02 +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] bluestore : None +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] fs_type : xfs +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] block_wal : None +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] journal : None +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] host : ebin02 +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] filestore : None +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] zap_disk : False +[2020-03-03 21:03:12,756][ceph_deploy.cli][INFO ] data : /dev/sdb +[2020-03-03 21:03:12,757][ceph_deploy.cli][INFO ] block_db : None +[2020-03-03 21:03:12,757][ceph_deploy.cli][INFO ] dmcrypt : False +[2020-03-03 21:03:12,757][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:03:12,757][ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys +[2020-03-03 21:03:12,757][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:03:12,757][ceph_deploy.cli][INFO ] debug : False +[2020-03-03 21:03:12,757][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb +[2020-03-03 21:03:17,258][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:03:17,259][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:03:17,421][ebin02][DEBUG ] detect machine type +[2020-03-03 21:03:17,449][ebin02][DEBUG ] find the location of an executable +[2020-03-03 21:03:17,456][ceph_deploy.osd][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:03:17,456][ceph_deploy.osd][DEBUG ] Deploying osd to ebin02 +[2020-03-03 21:03:17,456][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:03:17,476][ebin02][DEBUG ] find the location of an executable +[2020-03-03 21:03:17,494][ebin02][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb +[2020-03-03 21:03:39,202][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:03:39,202][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 45215366-fe82-4438-ae61-0644cfa457c3 +[2020-03-03 21:03:39,202][ebin02][WARNING] Running command: /sbin/vgcreate -s 1G --force --yes ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074 /dev/sdb +[2020-03-03 21:03:39,202][ebin02][WARNING] stdout: Physical volume "/dev/sdb" successfully created. +[2020-03-03 21:03:39,202][ebin02][WARNING] stdout: Volume group "ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074" successfully created +[2020-03-03 21:03:39,203][ebin02][WARNING] Running command: /sbin/lvcreate --yes -l 100%FREE -n osd-block-45215366-fe82-4438-ae61-0644cfa457c3 ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074 +[2020-03-03 21:03:39,203][ebin02][WARNING] stdout: Logical volume "osd-block-45215366-fe82-4438-ae61-0644cfa457c3" created. +[2020-03-03 21:03:39,203][ebin02][WARNING] Running command: /usr/bin/ceph-authtool --gen-print-key +[2020-03-03 21:03:39,203][ebin02][WARNING] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3 +[2020-03-03 21:03:39,203][ebin02][WARNING] --> Absolute path not found for executable: selinuxenabled +[2020-03-03 21:03:39,210][ebin02][WARNING] --> Ensure $PATH environment variable contains common executable locations +[2020-03-03 21:03:39,210][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /dev/ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074/osd-block-45215366-fe82-4438-ae61-0644cfa457c3 +[2020-03-03 21:03:39,213][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-1 +[2020-03-03 21:03:39,214][ebin02][WARNING] Running command: /bin/ln -s /dev/ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074/osd-block-45215366-fe82-4438-ae61-0644cfa457c3 /var/lib/ceph/osd/ceph-3/block +[2020-03-03 21:03:39,217][ebin02][WARNING] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap +[2020-03-03 21:03:39,232][ebin02][WARNING] stderr: 2020-03-03 21:03:30.031 7fb233a1e0 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory +[2020-03-03 21:03:39,240][ebin02][WARNING] 2020-03-03 21:03:30.031 7fb233a1e0 -1 AuthRegistry(0x7fac0814b8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx +[2020-03-03 21:03:39,240][ebin02][WARNING] stderr: got monmap epoch 1 +[2020-03-03 21:03:39,241][ebin02][WARNING] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQAHuF5eltB9OxAAgUzbsJRUPnWIZWS+yMHIaQ== +[2020-03-03 21:03:39,241][ebin02][WARNING] stdout: creating /var/lib/ceph/osd/ceph-3/keyring +[2020-03-03 21:03:39,244][ebin02][WARNING] added entity osd.3 auth(key=AQAHuF5eltB9OxAAgUzbsJRUPnWIZWS+yMHIaQ==) +[2020-03-03 21:03:39,244][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring +[2020-03-03 21:03:39,248][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/ +[2020-03-03 21:03:39,249][ebin02][WARNING] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 45215366-fe82-4438-ae61-0644cfa457c3 --setuser ceph --setgroup ceph +[2020-03-03 21:03:39,252][ebin02][WARNING] --> ceph-volume lvm prepare successful for: /dev/sdb +[2020-03-03 21:03:39,255][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 +[2020-03-03 21:03:39,255][ebin02][WARNING] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074/osd-block-45215366-fe82-4438-ae61-0644cfa457c3 --path /var/lib/ceph/osd/ceph-3 --no-mon-config +[2020-03-03 21:03:39,263][ebin02][WARNING] Running command: /bin/ln -snf /dev/ceph-0e6f6cfe-1fe2-46e2-9adf-982e50384074/osd-block-45215366-fe82-4438-ae61-0644cfa457c3 /var/lib/ceph/osd/ceph-3/block +[2020-03-03 21:03:39,264][ebin02][WARNING] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block +[2020-03-03 21:03:39,264][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /dev/dm-1 +[2020-03-03 21:03:39,264][ebin02][WARNING] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 +[2020-03-03 21:03:39,272][ebin02][WARNING] Running command: /bin/systemctl enable ceph-volume@lvm-3-45215366-fe82-4438-ae61-0644cfa457c3 +[2020-03-03 21:03:39,272][ebin02][WARNING] stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-45215366-fe82-4438-ae61-0644cfa457c3.service → /lib/systemd/system/ceph-volume@.service. +[2020-03-03 21:03:39,273][ebin02][WARNING] Running command: /bin/systemctl enable --runtime ceph-osd@3 +[2020-03-03 21:03:39,276][ebin02][WARNING] stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service → /lib/systemd/system/ceph-osd@.service. +[2020-03-03 21:03:39,279][ebin02][WARNING] Running command: /bin/systemctl start ceph-osd@3 +[2020-03-03 21:03:39,279][ebin02][WARNING] --> ceph-volume lvm activate successful for osd ID: 3 +[2020-03-03 21:03:39,283][ebin02][WARNING] --> ceph-volume lvm create successful for: /dev/sdb +[2020-03-03 21:03:44,448][ebin02][INFO ] checking OSD status... +[2020-03-03 21:03:44,448][ebin02][DEBUG ] find the location of an executable +[2020-03-03 21:03:45,369][ebin02][INFO ] Running command: /usr/bin/ceph --cluster=ceph osd stat --format=json +[2020-03-03 21:03:47,037][ceph_deploy.osd][DEBUG ] Host ebin02 is now ready for osd use. +[2020-03-03 21:09:46,171][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root config push riot01 pine01 pine02 ebin01 ebin02 +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02'] +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:09:46,172][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:09:46,172][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-03 21:09:47,710][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:09:47,712][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:09:47,881][riot01][DEBUG ] detect machine type +[2020-03-03 21:09:47,919][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:09:47,938][ceph_deploy.config][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite +[2020-03-03 21:09:47,939][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-03 21:09:49,664][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 21:09:49,666][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 21:09:49,819][pine01][DEBUG ] detect machine type +[2020-03-03 21:09:49,843][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:09:49,855][ceph_deploy.config][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite +[2020-03-03 21:09:49,856][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-03 21:09:51,466][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 21:09:51,468][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 21:09:51,588][pine02][DEBUG ] detect machine type +[2020-03-03 21:09:51,612][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:09:51,624][ceph_deploy.config][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite +[2020-03-03 21:09:51,624][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-03 21:09:53,661][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:09:53,663][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:09:53,821][ebin01][DEBUG ] detect machine type +[2020-03-03 21:09:53,854][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:09:53,869][ceph_deploy.config][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite +[2020-03-03 21:09:53,870][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-03 21:09:56,045][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:09:56,047][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:09:56,207][ebin02][DEBUG ] detect machine type +[2020-03-03 21:09:56,239][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:09:56,260][ceph_deploy.config][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite +[2020-03-03 21:09:56,261][ceph_deploy][ERROR ] GenericError: Failed to config 5 hosts + +[2020-03-03 21:10:05,807][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:10:05,807][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02'] +[2020-03-03 21:10:05,808][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:10:05,808][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:10:05,808][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:10:05,808][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-03 21:10:07,333][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:10:07,335][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:10:07,504][riot01][DEBUG ] detect machine type +[2020-03-03 21:10:07,545][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:10:07,567][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-03 21:10:09,292][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 21:10:09,294][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 21:10:09,438][pine01][DEBUG ] detect machine type +[2020-03-03 21:10:09,462][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:10:09,473][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-03 21:10:11,090][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 21:10:11,092][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 21:10:11,213][pine02][DEBUG ] detect machine type +[2020-03-03 21:10:11,239][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:10:11,251][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-03 21:10:13,468][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:10:13,470][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:10:13,632][ebin01][DEBUG ] detect machine type +[2020-03-03 21:10:13,663][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:10:13,679][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-03 21:10:15,896][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:10:15,898][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:10:16,060][ebin02][DEBUG ] detect machine type +[2020-03-03 21:10:16,092][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:13:47,640][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02'] +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:13:47,641][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:13:47,641][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-03 21:13:49,162][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:13:49,164][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:13:49,337][riot01][DEBUG ] detect machine type +[2020-03-03 21:13:49,382][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:13:49,403][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-03 21:13:51,105][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 21:13:51,107][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 21:13:51,252][pine01][DEBUG ] detect machine type +[2020-03-03 21:13:51,277][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:13:51,289][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-03 21:13:52,910][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 21:13:52,912][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 21:13:53,045][pine02][DEBUG ] detect machine type +[2020-03-03 21:13:53,075][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:13:53,087][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-03 21:13:55,242][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:13:55,244][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:13:55,403][ebin01][DEBUG ] detect machine type +[2020-03-03 21:13:55,436][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:13:55,451][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-03 21:13:57,594][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:13:57,596][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:13:57,756][ebin02][DEBUG ] detect machine type +[2020-03-03 21:13:57,790][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:20:14,746][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:20:14,746][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02'] +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:20:14,747][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:20:14,747][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-03 21:20:16,271][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:20:16,273][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:20:16,442][riot01][DEBUG ] detect machine type +[2020-03-03 21:20:16,482][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:20:16,503][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-03 21:20:18,237][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 21:20:18,239][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 21:20:18,395][pine01][DEBUG ] detect machine type +[2020-03-03 21:20:18,424][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:20:18,436][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-03 21:20:20,597][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 21:20:20,597][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 21:20:20,760][pine02][DEBUG ] detect machine type +[2020-03-03 21:20:20,787][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:20:20,800][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-03 21:20:23,224][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:20:23,225][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:20:23,539][ebin01][DEBUG ] detect machine type +[2020-03-03 21:20:23,585][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:20:23,988][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-03 21:20:27,984][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:20:27,986][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:20:28,333][ebin02][DEBUG ] detect machine type +[2020-03-03 21:20:28,379][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:27:12,191][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push riot01 pine01 pine02 ebin01 ebin02 +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] client : ['riot01', 'pine01', 'pine02', 'ebin01', 'ebin02'] +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:27:12,192][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:27:12,192][ceph_deploy.config][DEBUG ] Pushing config to riot01 +[2020-03-03 21:27:13,700][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:27:13,702][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:27:13,873][riot01][DEBUG ] detect machine type +[2020-03-03 21:27:13,914][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:27:13,936][ceph_deploy.config][DEBUG ] Pushing config to pine01 +[2020-03-03 21:27:15,625][pine01][DEBUG ] connected to host: root@pine01 +[2020-03-03 21:27:15,627][pine01][DEBUG ] detect platform information from remote host +[2020-03-03 21:27:15,773][pine01][DEBUG ] detect machine type +[2020-03-03 21:27:15,797][pine01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:27:15,809][ceph_deploy.config][DEBUG ] Pushing config to pine02 +[2020-03-03 21:27:17,363][pine02][DEBUG ] connected to host: root@pine02 +[2020-03-03 21:27:17,365][pine02][DEBUG ] detect platform information from remote host +[2020-03-03 21:27:17,485][pine02][DEBUG ] detect machine type +[2020-03-03 21:27:17,511][pine02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:27:17,525][ceph_deploy.config][DEBUG ] Pushing config to ebin01 +[2020-03-03 21:27:19,746][ebin01][DEBUG ] connected to host: root@ebin01 +[2020-03-03 21:27:19,748][ebin01][DEBUG ] detect platform information from remote host +[2020-03-03 21:27:19,908][ebin01][DEBUG ] detect machine type +[2020-03-03 21:27:19,940][ebin01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:27:19,955][ceph_deploy.config][DEBUG ] Pushing config to ebin02 +[2020-03-03 21:27:22,456][ebin02][DEBUG ] connected to host: root@ebin02 +[2020-03-03 21:27:22,458][ebin02][DEBUG ] detect platform information from remote host +[2020-03-03 21:27:22,691][ebin02][DEBUG ] detect machine type +[2020-03-03 21:27:22,732][ebin02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:42:10,621][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create riot01 +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] rgw : [('riot01', 'rgw.riot01')] +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:42:10,622][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:42:10,622][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts riot01:rgw.riot01 +[2020-03-03 21:42:12,097][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:42:12,099][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:42:12,274][riot01][DEBUG ] detect machine type +[2020-03-03 21:42:12,313][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:42:12,314][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-03 21:42:12,316][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to riot01 +[2020-03-03 21:42:12,316][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:42:12,343][riot01][WARNING] rgw keyring does not exist yet, creating one +[2020-03-03 21:42:12,343][riot01][DEBUG ] create a keyring file +[2020-03-03 21:42:12,361][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-03 21:42:12,378][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.riot01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.riot01/keyring +[2020-03-03 21:42:13,262][riot01][ERROR ] Traceback (most recent call last): +[2020-03-03 21:42:13,262][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-03 21:42:13,263][riot01][ERROR ] retval = main() +[2020-03-03 21:42:13,263][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-03 21:42:13,263][riot01][ERROR ] conffile=conffile) +[2020-03-03 21:42:13,263][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-03 21:42:13,264][riot01][ERROR ] raise Exception("timed out") +[2020-03-03 21:42:13,264][riot01][ERROR ] Exception: timed out +[2020-03-03 21:42:13,264][riot01][ERROR ] exit code from command was: 1 +[2020-03-03 21:42:13,264][ceph_deploy.rgw][ERROR ] could not create rgw +[2020-03-03 21:42:13,265][ceph_deploy][ERROR ] GenericError: Failed to create 1 RGWs + +[2020-03-03 21:43:55,513][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create riot01 +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] rgw : [('riot01', 'rgw.riot01')] +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:43:55,513][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:43:55,514][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts riot01:rgw.riot01 +[2020-03-03 21:43:57,024][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:43:57,026][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:43:57,196][riot01][DEBUG ] detect machine type +[2020-03-03 21:43:57,236][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:43:57,236][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-03 21:43:57,238][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to riot01 +[2020-03-03 21:43:57,238][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:43:57,268][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-03 21:43:57,284][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.riot01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.riot01/keyring +[2020-03-03 21:43:58,167][riot01][ERROR ] Traceback (most recent call last): +[2020-03-03 21:43:58,168][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-03 21:43:58,168][riot01][ERROR ] retval = main() +[2020-03-03 21:43:58,168][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-03 21:43:58,168][riot01][ERROR ] conffile=conffile) +[2020-03-03 21:43:58,168][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-03 21:43:58,169][riot01][ERROR ] raise Exception("timed out") +[2020-03-03 21:43:58,169][riot01][ERROR ] Exception: timed out +[2020-03-03 21:43:58,169][riot01][ERROR ] exit code from command was: 1 +[2020-03-03 21:43:58,169][ceph_deploy.rgw][ERROR ] could not create rgw +[2020-03-03 21:43:58,170][ceph_deploy][ERROR ] GenericError: Failed to create 1 RGWs + +[2020-03-03 21:49:04,635][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin riot01 +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] client : ['riot01'] +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:49:04,635][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:49:04,636][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:49:04,636][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to riot01 +[2020-03-03 21:49:06,134][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:49:06,137][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:49:06,305][riot01][DEBUG ] detect machine type +[2020-03-03 21:49:06,342][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:49:14,206][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root rgw create riot01 +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] username : root +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] rgw : [('riot01', 'rgw.riot01')] +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] subcommand : create +[2020-03-03 21:49:14,206][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 21:49:14,207][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 21:49:14,207][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 21:49:14,207][ceph_deploy.cli][INFO ] func : +[2020-03-03 21:49:14,207][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 21:49:14,207][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 21:49:14,207][ceph_deploy.rgw][DEBUG ] Deploying rgw, cluster ceph hosts riot01:rgw.riot01 +[2020-03-03 21:49:15,751][riot01][DEBUG ] connected to host: root@riot01 +[2020-03-03 21:49:15,753][riot01][DEBUG ] detect platform information from remote host +[2020-03-03 21:49:15,923][riot01][DEBUG ] detect machine type +[2020-03-03 21:49:15,964][ceph_deploy.rgw][INFO ] Distro info: debian 10.3 buster +[2020-03-03 21:49:15,965][ceph_deploy.rgw][DEBUG ] remote host will use systemd +[2020-03-03 21:49:15,966][ceph_deploy.rgw][DEBUG ] deploying rgw bootstrap to riot01 +[2020-03-03 21:49:15,967][riot01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 21:49:15,995][riot01][DEBUG ] create path recursively if it doesn't exist +[2020-03-03 21:49:16,010][riot01][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.riot01 osd allow rwx mon allow rw -o /var/lib/ceph/radosgw/ceph-rgw.riot01/keyring +[2020-03-03 21:49:16,891][riot01][ERROR ] Traceback (most recent call last): +[2020-03-03 21:49:16,892][riot01][ERROR ] File "/usr/bin/ceph", line 1266, in +[2020-03-03 21:49:16,892][riot01][ERROR ] retval = main() +[2020-03-03 21:49:16,892][riot01][ERROR ] File "/usr/bin/ceph", line 979, in main +[2020-03-03 21:49:16,892][riot01][ERROR ] conffile=conffile) +[2020-03-03 21:49:16,893][riot01][ERROR ] File "/usr/lib/python3/dist-packages/ceph_argparse.py", line 1319, in run_in_thread +[2020-03-03 21:49:16,893][riot01][ERROR ] raise Exception("timed out") +[2020-03-03 21:49:16,893][riot01][ERROR ] Exception: timed out +[2020-03-03 21:49:16,893][riot01][ERROR ] exit code from command was: 1 +[2020-03-03 21:49:16,893][ceph_deploy.rgw][ERROR ] could not create rgw +[2020-03-03 21:49:16,894][ceph_deploy][ERROR ] GenericError: Failed to create 1 RGWs + +[2020-03-03 22:03:53,501][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin lenny +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] username : root +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] client : ['lenny'] +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] func : +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 22:03:53,502][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 22:03:53,502][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to lenny +[2020-03-03 22:03:53,842][lenny][DEBUG ] connected to host: root@lenny +[2020-03-03 22:03:53,843][lenny][DEBUG ] detect platform information from remote host +[2020-03-03 22:03:53,859][lenny][DEBUG ] detect machine type +[2020-03-03 22:03:53,862][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 22:03:53,864][ceph_deploy.admin][ERROR ] RuntimeError: config file /etc/ceph/ceph.conf exists with different content; use --overwrite-conf to overwrite +[2020-03-03 22:03:53,864][ceph_deploy][ERROR ] GenericError: Failed to configure 1 admin hosts + +[2020-03-03 22:04:06,808][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 22:04:06,808][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root --overwrite-conf config push lenny +[2020-03-03 22:04:06,808][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] username : root +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] overwrite_conf : True +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] subcommand : push +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] client : ['lenny'] +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] func : +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 22:04:06,809][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 22:04:06,809][ceph_deploy.config][DEBUG ] Pushing config to lenny +[2020-03-03 22:04:07,221][lenny][DEBUG ] connected to host: root@lenny +[2020-03-03 22:04:07,221][lenny][DEBUG ] detect platform information from remote host +[2020-03-03 22:04:07,236][lenny][DEBUG ] detect machine type +[2020-03-03 22:04:07,240][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf +[2020-03-03 22:04:29,625][ceph_deploy.conf][DEBUG ] found configuration file at: /home/do/.cephdeploy.conf +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy --username root admin lenny +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] ceph-deploy options: +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] username : root +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] verbose : False +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] overwrite_conf : False +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] quiet : False +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] cd_conf : +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] cluster : ceph +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] client : ['lenny'] +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] func : +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] ceph_conf : None +[2020-03-03 22:04:29,625][ceph_deploy.cli][INFO ] default_release : False +[2020-03-03 22:04:29,625][ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to lenny +[2020-03-03 22:04:29,982][lenny][DEBUG ] connected to host: root@lenny +[2020-03-03 22:04:29,982][lenny][DEBUG ] detect platform information from remote host +[2020-03-03 22:04:29,997][lenny][DEBUG ] detect machine type +[2020-03-03 22:04:30,000][lenny][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf diff --git a/ceph.bootstrap-mds.keyring b/ceph.bootstrap-mds.keyring new file mode 100644 index 0000000..10c0b2e --- /dev/null +++ b/ceph.bootstrap-mds.keyring @@ -0,0 +1,3 @@ +[client.bootstrap-mds] + key = AQAEtV5epuXwLhAALp4VAsSfVhs5XujznXBmUg== + caps mon = "allow profile bootstrap-mds" diff --git a/ceph.bootstrap-mgr.keyring b/ceph.bootstrap-mgr.keyring new file mode 100644 index 0000000..f006a28 --- /dev/null +++ b/ceph.bootstrap-mgr.keyring @@ -0,0 +1,3 @@ +[client.bootstrap-mgr] + key = AQAEtV5eSV3xLhAAiwSninS8+gXUiK6AqcJ0jw== + caps mon = "allow profile bootstrap-mgr" diff --git a/ceph.bootstrap-osd.keyring b/ceph.bootstrap-osd.keyring new file mode 100644 index 0000000..cf0fa85 --- /dev/null +++ b/ceph.bootstrap-osd.keyring @@ -0,0 +1,3 @@ +[client.bootstrap-osd] + key = AQAEtV5eaM7xLhAAOghmZDAD2g6s1knUyNNEjA== + caps mon = "allow profile bootstrap-osd" diff --git a/ceph.bootstrap-rgw.keyring b/ceph.bootstrap-rgw.keyring new file mode 100644 index 0000000..887eeba --- /dev/null +++ b/ceph.bootstrap-rgw.keyring @@ -0,0 +1,3 @@ +[client.bootstrap-rgw] + key = AQAEtV5erUHzLhAAhhM/4/kbeOMzogETWZcs+A== + caps mon = "allow profile bootstrap-rgw" diff --git a/ceph.client.admin.keyring b/ceph.client.admin.keyring new file mode 100644 index 0000000..86910ad --- /dev/null +++ b/ceph.client.admin.keyring @@ -0,0 +1,6 @@ +[client.admin] + key = AQAEtV5eTjjwLhAAjhBblIIarKdfZ01Y5wTkbg== + caps mds = "allow *" + caps mgr = "allow *" + caps mon = "allow *" + caps osd = "allow *" diff --git a/ceph.conf b/ceph.conf new file mode 100644 index 0000000..b7adb85 --- /dev/null +++ b/ceph.conf @@ -0,0 +1,11 @@ +[global] +fsid = 29ef4020-303a-4b2e-aa24-a1e20e5ba21c +#ms_bind_ipv6 = true +mon_initial_members = pine01, pine02 +mon_host = 192.168.10.160,192.168.10.161 +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx +public network = 192.168.10.0/24 +osd pool default pg num = 144 +osd pool default pgp num = 144 diff --git a/ceph.mon.keyring b/ceph.mon.keyring new file mode 100644 index 0000000..9b91ac1 --- /dev/null +++ b/ceph.mon.keyring @@ -0,0 +1,3 @@ +[mon.] +key = AQD7sF5eAAAAABAAJjvyccVMIhCSMPcPIihGCg== +caps mon = allow *