[ceph@node0 cluster]$ <strong>ceph-deploy new node1 node2 node3</strong>
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy new node1 node2 node3
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] func : <function new at 0x29f2b18>
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x2a15a70>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] ssh_copykey : True
[ceph_deploy.cli][INFO ] mon : ['node1', 'node2', 'node3']
[ceph_deploy.cli][INFO ] public_network : None
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] cluster_network : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] fsid : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[node1][DEBUG ] connected to host: node0
[node1][INFO ] Running command: ssh -CT -o BatchMode=yes node1
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[node1][INFO ] Running command: sudo /usr/sbin/ip link show
[node1][INFO ] Running command: sudo /usr/sbin/ip addr show
[node1][DEBUG ] IP addresses found: ['192.168.92.101', '192.168.1.102', '192.168.122.1']
[ceph_deploy.new][DEBUG ] Resolving host node1
[ceph_deploy.new][DEBUG ] Monitor node1 at 192.168.92.101
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[node2][DEBUG ] connected to host: node0
[node2][INFO ] Running command: ssh -CT -o BatchMode=yes node2
[node2][DEBUG ] connection detected need for sudo
[node2][DEBUG ] connected to host: node2
[node2][DEBUG ] detect platform information from remote host
[node2][DEBUG ] detect machine type
[node2][DEBUG ] find the location of an executable
[node2][INFO ] Running command: sudo /usr/sbin/ip link show
[node2][INFO ] Running command: sudo /usr/sbin/ip addr show
[node2][DEBUG ] IP addresses found: ['192.168.1.103', '192.168.122.1', '192.168.92.102']
[ceph_deploy.new][DEBUG ] Resolving host node2
[ceph_deploy.new][DEBUG ] Monitor node2 at 192.168.92.102
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[node3][DEBUG ] connected to host: node0
[node3][INFO ] Running command: ssh -CT -o BatchMode=yes node3
[node3][DEBUG ] connection detected need for sudo
[node3][DEBUG ] connected to host: node3
[node3][DEBUG ] detect platform information from remote host
[node3][DEBUG ] detect machine type
[node3][DEBUG ] find the location of an executable
[node3][INFO ] Running command: sudo /usr/sbin/ip link show
[node3][INFO ] Running command: sudo /usr/sbin/ip addr show
[node3][DEBUG ] IP addresses found: ['192.168.122.1', '192.168.1.104', '192.168.92.103']
[ceph_deploy.new][DEBUG ] Resolving host node3
[ceph_deploy.new][DEBUG ] Monitor node3 at 192.168.92.103
[ceph_deploy.new][DEBUG ] Monitor initial members are ['node1', 'node2', 'node3']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.92.101', '192.168.92.102', '192.168.92.103']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
[ceph@node0 cluster]$
[ceph@node0 cluster]$ ceph-deploy install node0 node1 node2 node3
[ceph@node0 cluster]$ ceph-deploy install node0 node1 node2 node3
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy install node0 node1 node2 node3
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] testing : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x2ae0560>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] dev_commit : None
[ceph_deploy.cli][INFO ] install_mds : False
[ceph_deploy.cli][INFO ] stable : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] adjust_repos : True
[ceph_deploy.cli][INFO ] func : <function install at 0x2a53668>
[ceph_deploy.cli][INFO ] install_all : False
[ceph_deploy.cli][INFO ] repo : False
[ceph_deploy.cli][INFO ] host : ['node0', 'node1', 'node2', 'node3']
[ceph_deploy.cli][INFO ] install_rgw : False
[ceph_deploy.cli][INFO ] install_tests : False
[ceph_deploy.cli][INFO ] repo_url : None
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] install_osd : False
[ceph_deploy.cli][INFO ] version_kind : stable
[ceph_deploy.cli][INFO ] install_common : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] dev : master
[ceph_deploy.cli][INFO ] local_mirror : None
[ceph_deploy.cli][INFO ] release : None
[ceph_deploy.cli][INFO ] install_mon : False
[ceph_deploy.cli][INFO ] gpg_url : None
[ceph_deploy.install][DEBUG ] Installing stable version jewel on cluster ceph hosts node0 node1 node2 node3
[ceph_deploy.install][DEBUG ] Detecting platform for host node0 ...
[node0][DEBUG ] connection detected need for sudo
[node0][DEBUG ] connected to host: node0
[node0][DEBUG ] detect platform information from remote host
[node0][DEBUG ] detect machine type
[ceph_deploy.install][INFO ] Distro info: CentOS Linux 7.2.1511 Core
[node0][INFO ] installing Ceph on node0
[node0][INFO ] Running command: sudo yum clean all
[node0][DEBUG ] Loaded plugins: fastestmirror, langpacks, priorities
[node0][DEBUG ] Cleaning repos: Ceph Ceph-noarch base ceph-source epel extras updates
[node0][DEBUG ] Cleaning up everything
[node0][DEBUG ] Cleaning up list of fastest mirrors
[node0][INFO ] Running command: sudo yum -y install epel-release
[node0][DEBUG ] Loaded plugins: fastestmirror, langpacks, priorities
[node0][DEBUG ] Determining fastest mirrors
[node0][DEBUG ] * epel: mirror01.idc.hinet.net
[node0][DEBUG ] 25 packages excluded due to repository priority protections
[node0][DEBUG ] Package epel-release-7-7.noarch already installed and latest version
[node0][DEBUG ] Nothing to do
[node0][INFO ] Running command: sudo yum -y install yum-plugin-priorities
[node0][DEBUG ] Loaded plugins: fastestmirror, langpacks, priorities
[node0][DEBUG ] Loading mirror speeds from cached hostfile
[node0][DEBUG ] * epel: mirror01.idc.hinet.net
[node0][DEBUG ] 25 packages excluded due to repository priority protections
[node0][DEBUG ] Package yum-plugin-priorities-1.1.31-34.el7.noarch already installed and latest version
[node0][DEBUG ] Nothing to do
[node0][DEBUG ] Configure Yum priorities to include obsoletes
[node0][WARNIN] check_obsoletes has been enabled for Yum priorities plugin
[node0][INFO ] Running command: sudo rpm --import https:
[node0][INFO ] Running command: sudo rpm -Uvh --replacepkgs https:
[node0][DEBUG ] Retrieving https:
[node0][DEBUG ] Preparing... ########################################
[node0][DEBUG ] Updating / installing...
[node0][DEBUG ] ceph-release-1-1.el7 ########################################
[node0][WARNIN] ensuring that /etc/yum.repos.d/ceph.repo contains a high priority
[node0][WARNIN] altered ceph.repo priorities to contain: priority=1
[node0][INFO ] Running command: sudo yum -y install ceph ceph-radosgw
[node0][DEBUG ] Loaded plugins: fastestmirror, langpacks, priorities
[node0][DEBUG ] Loading mirror speeds from cached hostfile
[node0][DEBUG ] * epel: mirror01.idc.hinet.net
[node0][DEBUG ] 25 packages excluded due to repository priority protections
[node0][DEBUG ] Package 1:ceph-10.2.2-0.el7.x86_64 already installed and latest version
[node0][DEBUG ] Package 1:ceph-radosgw-10.2.2-0.el7.x86_64 already installed and latest version
[node0][DEBUG ] Nothing to do
[node0][INFO ] Running command: sudo ceph --version
[node0][DEBUG ] ceph version 10.2.2 (45107e21c568dd033c2f0a3107dec8f0b0e58374)
….
[ceph@node0 cluster]$ ceph-deploy mon create-initial
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : create-initial
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fbe46804cb0>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] func : <function mon at 0x7fbe467f6aa0>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] keyrings : None
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts node1 node2 node3
[ceph_deploy.mon][DEBUG ] detecting platform for host node1 ...
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO ] distro info: CentOS Linux 7.2.1511 Core
[node1][DEBUG ] determining if provided host has same hostname in remote
[node1][DEBUG ] get remote short hostname
[node1][DEBUG ] deploying mon to node1
[node1][DEBUG ] get remote short hostname
[node1][DEBUG ] remote hostname: node1
[node1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node1][DEBUG ] create the mon path if it does not exist
[node1][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node1/done
[node1][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node1/done
[node1][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-node1.mon.keyring
[node1][DEBUG ] create the monitor keyring file
[node1][INFO ] Running command: sudo ceph-mon --cluster ceph --mkfs -i node1 --keyring /var/lib/ceph/tmp/ceph-node1.mon.keyring --setuser 1001 --setgroup 1001
[node1][DEBUG ] ceph-mon: mon.noname-a 192.168.92.101:6789/0 is local, renaming to mon.node1
[node1][DEBUG ] ceph-mon: set fsid to 4f8f6c46-9f67-4475-9cb5-52cafecb3e4c
[node1][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-node1 for mon.node1
[node1][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-node1.mon.keyring
[node1][DEBUG ] create a done file to avoid re-doing the mon deployment
[node1][DEBUG ] create the init path if it does not exist
[node1][INFO ] Running command: sudo systemctl enable ceph.target
[node1][INFO ] Running command: sudo systemctl enable ceph-mon@node1
[node1][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node1.service to /usr/lib/systemd/system/ceph-mon@.service.
[node1][INFO ] Running command: sudo systemctl start ceph-mon@node1
[node1][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[node1][DEBUG ] ********************************************************************************
[node1][DEBUG ] status for monitor: mon.node1
[node1][DEBUG ] {
[node1][DEBUG ] "election_epoch": 0,
[node1][DEBUG ] "extra_probe_peers": [
[node1][DEBUG ] "192.168.92.102:6789/0",
[node1][DEBUG ] "192.168.92.103:6789/0"
[node1][DEBUG ] ],
[node1][DEBUG ] "monmap": {
[node1][DEBUG ] "created": "2016-06-24 14:43:29.944474",
[node1][DEBUG ] "epoch": 0,
[node1][DEBUG ] "fsid": "4f8f6c46-9f67-4475-9cb5-52cafecb3e4c",
[node1][DEBUG ] "modified": "2016-06-24 14:43:29.944474",
[node1][DEBUG ] "mons": [
[node1][DEBUG ] {
[node1][DEBUG ] "addr": "192.168.92.101:6789/0",
[node1][DEBUG ] "name": "node1",
[node1][DEBUG ] "rank": 0
[node1][DEBUG ] },
[node1][DEBUG ] {
[node1][DEBUG ] "addr": "0.0.0.0:0/1",
[node1][DEBUG ] "name": "node2",
[node1][DEBUG ] "rank": 1
[node1][DEBUG ] },
[node1][DEBUG ] {
[node1][DEBUG ] "addr": "0.0.0.0:0/2",
[node1][DEBUG ] "name": "node3",
[node1][DEBUG ] "rank": 2
[node1][DEBUG ] }
[node1][DEBUG ] ]
[node1][DEBUG ] },
[node1][DEBUG ] "name": "node1",
[node1][DEBUG ] "outside_quorum": [
[node1][DEBUG ] "node1"
[node1][DEBUG ] ],
[node1][DEBUG ] "quorum": [],
[node1][DEBUG ] "rank": 0,
[node1][DEBUG ] "state": "probing",
[node1][DEBUG ] "sync_provider": []
[node1][DEBUG ] }
[node1][DEBUG ] ********************************************************************************
[node1][INFO ] monitor: mon.node1 is running
[node1][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[ceph_deploy.mon][DEBUG ] detecting platform for host node2 ...
[node2][DEBUG ] connection detected need for sudo
[node2][DEBUG ] connected to host: node2
[node2][DEBUG ] detect platform information from remote host
[node2][DEBUG ] detect machine type
[node2][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO ] distro info: CentOS Linux 7.2.1511 Core
[node2][DEBUG ] determining if provided host has same hostname in remote
[node2][DEBUG ] get remote short hostname
[node2][DEBUG ] deploying mon to node2
[node2][DEBUG ] get remote short hostname
[node2][DEBUG ] remote hostname: node2
[node2][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node2][DEBUG ] create the mon path if it does not exist
[node2][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node2/done
[node2][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node2/done
[node2][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-node2.mon.keyring
[node2][DEBUG ] create the monitor keyring file
[node2][INFO ] Running command: sudo ceph-mon --cluster ceph --mkfs -i node2 --keyring /var/lib/ceph/tmp/ceph-node2.mon.keyring --setuser 1001 --setgroup 1001
[node2][DEBUG ] ceph-mon: mon.noname-b 192.168.92.102:6789/0 is local, renaming to mon.node2
[node2][DEBUG ] ceph-mon: set fsid to 4f8f6c46-9f67-4475-9cb5-52cafecb3e4c
[node2][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-node2 for mon.node2
[node2][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-node2.mon.keyring
[node2][DEBUG ] create a done file to avoid re-doing the mon deployment
[node2][DEBUG ] create the init path if it does not exist
[node2][INFO ] Running command: sudo systemctl enable ceph.target
[node2][INFO ] Running command: sudo systemctl enable ceph-mon@node2
[node2][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node2.service to /usr/lib/systemd/system/ceph-mon@.service.
[node2][INFO ] Running command: sudo systemctl start ceph-mon@node2
[node2][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node2.asok mon_status
[node2][DEBUG ] ********************************************************************************
[node2][DEBUG ] status for monitor: mon.node2
[node2][DEBUG ] {
[node2][DEBUG ] "election_epoch": 1,
[node2][DEBUG ] "extra_probe_peers": [
[node2][DEBUG ] "192.168.92.101:6789/0",
[node2][DEBUG ] "192.168.92.103:6789/0"
[node2][DEBUG ] ],
[node2][DEBUG ] "monmap": {
[node2][DEBUG ] "created": "2016-06-24 14:43:34.865908",
[node2][DEBUG ] "epoch": 0,
[node2][DEBUG ] "fsid": "4f8f6c46-9f67-4475-9cb5-52cafecb3e4c",
[node2][DEBUG ] "modified": "2016-06-24 14:43:34.865908",
[node2][DEBUG ] "mons": [
[node2][DEBUG ] {
[node2][DEBUG ] "addr": "192.168.92.101:6789/0",
[node2][DEBUG ] "name": "node1",
[node2][DEBUG ] "rank": 0
[node2][DEBUG ] },
[node2][DEBUG ] {
[node2][DEBUG ] "addr": "192.168.92.102:6789/0",
[node2][DEBUG ] "name": "node2",
[node2][DEBUG ] "rank": 1
[node2][DEBUG ] },
[node2][DEBUG ] {
[node2][DEBUG ] "addr": "0.0.0.0:0/2",
[node2][DEBUG ] "name": "node3",
[node2][DEBUG ] "rank": 2
[node2][DEBUG ] }
[node2][DEBUG ] ]
[node2][DEBUG ] },
[node2][DEBUG ] "name": "node2",
[node2][DEBUG ] "outside_quorum": [],
[node2][DEBUG ] "quorum": [],
[node2][DEBUG ] "rank": 1,
[node2][DEBUG ] "state": "electing",
[node2][DEBUG ] "sync_provider": []
[node2][DEBUG ] }
[node2][DEBUG ] ********************************************************************************
[node2][INFO ] monitor: mon.node2 is running
[node2][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node2.asok mon_status
[ceph_deploy.mon][DEBUG ] detecting platform for host node3 ...
[node3][DEBUG ] connection detected need for sudo
[node3][DEBUG ] connected to host: node3
[node3][DEBUG ] detect platform information from remote host
[node3][DEBUG ] detect machine type
[node3][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO ] distro info: CentOS Linux 7.2.1511 Core
[node3][DEBUG ] determining if provided host has same hostname in remote
[node3][DEBUG ] get remote short hostname
[node3][DEBUG ] deploying mon to node3
[node3][DEBUG ] get remote short hostname
[node3][DEBUG ] remote hostname: node3
[node3][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node3][DEBUG ] create the mon path if it does not exist
[node3][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node3/done
[node3][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node3/done
[node3][INFO ] creating keyring file: /var/lib/ceph/tmp/ceph-node3.mon.keyring
[node3][DEBUG ] create the monitor keyring file
[node3][INFO ] Running command: sudo ceph-mon --cluster ceph --mkfs -i node3 --keyring /var/lib/ceph/tmp/ceph-node3.mon.keyring --setuser 1001 --setgroup 1001
[node3][DEBUG ] ceph-mon: mon.noname-c 192.168.92.103:6789/0 is local, renaming to mon.node3
[node3][DEBUG ] ceph-mon: set fsid to 4f8f6c46-9f67-4475-9cb5-52cafecb3e4c
[node3][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-node3 for mon.node3
[node3][INFO ] unlinking keyring file /var/lib/ceph/tmp/ceph-node3.mon.keyring
[node3][DEBUG ] create a done file to avoid re-doing the mon deployment
[node3][DEBUG ] create the init path if it does not exist
[node3][INFO ] Running command: sudo systemctl enable ceph.target
[node3][INFO ] Running command: sudo systemctl enable ceph-mon@node3
[node3][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node3.service to /usr/lib/systemd/system/ceph-mon@.service.
[node3][INFO ] Running command: sudo systemctl start ceph-mon@node3
[node3][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node3.asok mon_status
[node3][DEBUG ] ********************************************************************************
[node3][DEBUG ] status for monitor: mon.node3
[node3][DEBUG ] {
[node3][DEBUG ] "election_epoch": 1,
[node3][DEBUG ] "extra_probe_peers": [
[node3][DEBUG ] "192.168.92.101:6789/0",
[node3][DEBUG ] "192.168.92.102:6789/0"
[node3][DEBUG ] ],
[node3][DEBUG ] "monmap": {
[node3][DEBUG ] "created": "2016-06-24 14:43:39.800046",
[node3][DEBUG ] "epoch": 0,
[node3][DEBUG ] "fsid": "4f8f6c46-9f67-4475-9cb5-52cafecb3e4c",
[node3][DEBUG ] "modified": "2016-06-24 14:43:39.800046",
[node3][DEBUG ] "mons": [
[node3][DEBUG ] {
[node3][DEBUG ] "addr": "192.168.92.101:6789/0",
[node3][DEBUG ] "name": "node1",
[node3][DEBUG ] "rank": 0
[node3][DEBUG ] },
[node3][DEBUG ] {
[node3][DEBUG ] "addr": "192.168.92.102:6789/0",
[node3][DEBUG ] "name": "node2",
[node3][DEBUG ] "rank": 1
[node3][DEBUG ] },
[node3][DEBUG ] {
[node3][DEBUG ] "addr": "192.168.92.103:6789/0",
[node3][DEBUG ] "name": "node3",
[node3][DEBUG ] "rank": 2
[node3][DEBUG ] }
[node3][DEBUG ] ]
[node3][DEBUG ] },
[node3][DEBUG ] "name": "node3",
[node3][DEBUG ] "outside_quorum": [],
[node3][DEBUG ] "quorum": [],
[node3][DEBUG ] "rank": 2,
[node3][DEBUG ] "state": "electing",
[node3][DEBUG ] "sync_provider": []
[node3][DEBUG ] }
[node3][DEBUG ] ********************************************************************************
[node3][INFO ] monitor: mon.node3 is running
[node3][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node3.asok mon_status
[ceph_deploy.mon][INFO ] processing monitor mon.node1
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[node1][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[ceph_deploy.mon][WARNIN] mon.node1 monitor is not yet in quorum, tries left: 5
[ceph_deploy.mon][WARNIN] waiting 5 seconds before retrying
[node1][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[ceph_deploy.mon][WARNIN] mon.node1 monitor is not yet in quorum, tries left: 4
[ceph_deploy.mon][WARNIN] waiting 10 seconds before retrying
[node1][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[ceph_deploy.mon][INFO ] mon.node1 monitor has reached quorum!
[ceph_deploy.mon][INFO ] processing monitor mon.node2
[node2][DEBUG ] connection detected need for sudo
[node2][DEBUG ] connected to host: node2
[node2][DEBUG ] detect platform information from remote host
[node2][DEBUG ] detect machine type
[node2][DEBUG ] find the location of an executable
[node2][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node2.asok mon_status
[ceph_deploy.mon][INFO ] mon.node2 monitor has reached quorum!
[ceph_deploy.mon][INFO ] processing monitor mon.node3
[node3][DEBUG ] connection detected need for sudo
[node3][DEBUG ] connected to host: node3
[node3][DEBUG ] detect platform information from remote host
[node3][DEBUG ] detect machine type
[node3][DEBUG ] find the location of an executable
[node3][INFO ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node3.asok mon_status
[ceph_deploy.mon][INFO ] mon.node3 monitor has reached quorum!
[ceph_deploy.mon][INFO ] all initial monitors are running and have formed quorum
[ceph_deploy.mon][INFO ] Running gatherkeys...
[ceph_deploy.gatherkeys][INFO ] Storing keys in temp directory /tmp/tmp5_jcSr
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] get remote short hostname
[node1][DEBUG ] fetch remote file
[node1][INFO ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.node1.asok mon_status
[node1][INFO ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node1/keyring auth get-or-create client.admin osd allow * mds allow * mon allow *
[node1][INFO ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node1/keyring auth get-or-create client.bootstrap-mds mon allow profile bootstrap-mds
[node1][INFO ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node1/keyring auth get-or-create client.bootstrap-osd mon allow profile bootstrap-osd
[node1][INFO ] Running command: sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-node1/keyring auth get-or-create client.bootstrap-rgw mon allow profile bootstrap-rgw
[ceph_deploy.gatherkeys][INFO ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO ] Destroy temp directory /tmp/tmp5_jcSr
[ceph@node0 cluster]$
[ceph@node0 cluster]$ ceph-deploy osd prepare node1:/dev/sdb
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (1.5.34): /usr/bin/ceph-deploy osd prepare node1:/dev/sdb
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] disk : [('node1', '/dev/sdb', None)]
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : prepare
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x10b4d88>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] func : <function osd at 0x10a9398>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks node1:/dev/sdb:
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.2.1511 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to node1
[node1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.osd][DEBUG ] Preparing host node1 disk /dev/sdb journal None activate False
[node1][DEBUG ] find the location of an executable
[node1][INFO ] Running command: sudo /usr/sbin/ceph-disk -v prepare --cluster ceph --fs-type xfs -- /dev/sdb
[node1][WARNIN] command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[node1][WARNIN] command: Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --cluster ceph
[node1][WARNIN] command: Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --cluster ceph
[node1][WARNIN] command: Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --cluster ceph
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] set_type: Will colocate journal with data on /dev/sdb
[node1][WARNIN] command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs
[node1][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs
[node1][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[node1][WARNIN] command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] ptype_tobe_for_name: name = journal
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] create_partition: Creating journal partition num 2 size 5120 on /dev/sdb
[node1][WARNIN] command_check_call: Running command: /sbin/sgdisk --new=2:0:+5120M --change-name=2:ceph journal --partition-guid=2:75a991e0-d8e1-414f-825d-1635edc8fbe5 --typecode=2:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/sdb
[node1][DEBUG ] The operation has completed successfully.
[node1][WARNIN] update_partition: Calling partprobe on created device /dev/sdb
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
[node1][WARNIN] command: Running command: /sbin/partprobe /dev/sdb
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb2 uuid path is /sys/dev/block/8:18/dm/uuid
[node1][WARNIN] prepare_device: Journal is GPT partition /dev/disk/by-partuuid/75a991e0-d8e1-414f-825d-1635edc8fbe5
[node1][WARNIN] prepare_device: Journal is GPT partition /dev/disk/by-partuuid/75a991e0-d8e1-414f-825d-1635edc8fbe5
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] set_data_partition: Creating osd partition on /dev/sdb
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] ptype_tobe_for_name: name = data
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] create_partition: Creating data partition num 1 size 0 on /dev/sdb
[node1][WARNIN] command_check_call: Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:25ae7735-d1ca-45d5-9d98-63567b424248 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be --mbrtogpt -- /dev/sdb
[node1][DEBUG ] The operation has completed successfully.
[node1][WARNIN] update_partition: Calling partprobe on created device /dev/sdb
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
[node1][WARNIN] command: Running command: /sbin/partprobe /dev/sdb
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[node1][WARNIN] populate_data_path_device: Creating xfs fs on /dev/sdb1
[node1][WARNIN] command_check_call: Running command: /sbin/mkfs -t xfs -f -i size=2048 -- /dev/sdb1
[node1][DEBUG ] meta-data=/dev/sdb1 isize=2048 agcount=4, agsize=982975 blks
[node1][DEBUG ] = sectsz=512 attr=2, projid32bit=1
[node1][DEBUG ] = crc=0 finobt=0
[node1][DEBUG ] data = bsize=4096 blocks=3931899, imaxpct=25
[node1][DEBUG ] = sunit=0 swidth=0 blks
[node1][DEBUG ] naming =version 2 bsize=4096 ascii-ci=0 ftype=0
[node1][DEBUG ] log =internal log bsize=4096 blocks=2560, version=2
[node1][DEBUG ] = sectsz=512 sunit=0 blks, lazy-count=1
[node1][DEBUG ] realtime =none extsz=4096 blocks=0, rtextents=0
[node1][WARNIN] mount: Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.6_6ywP with options noatime,inode64
[node1][WARNIN] command_check_call: Running command: /usr/bin/mount -t xfs -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] command: Running command: /sbin/restorecon /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] populate_data_path: Preparing osd data dir /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] command: Running command: /sbin/restorecon -R /var/lib/ceph/tmp/mnt.6_6ywP/ceph_fsid.14578.tmp
[node1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.6_6ywP/ceph_fsid.14578.tmp
[node1][WARNIN] command: Running command: /sbin/restorecon -R /var/lib/ceph/tmp/mnt.6_6ywP/fsid.14578.tmp
[node1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.6_6ywP/fsid.14578.tmp
[node1][WARNIN] command: Running command: /sbin/restorecon -R /var/lib/ceph/tmp/mnt.6_6ywP/magic.14578.tmp
[node1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.6_6ywP/magic.14578.tmp
[node1][WARNIN] command: Running command: /sbin/restorecon -R /var/lib/ceph/tmp/mnt.6_6ywP/journal_uuid.14578.tmp
[node1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.6_6ywP/journal_uuid.14578.tmp
[node1][WARNIN] adjust_symlink: Creating symlink /var/lib/ceph/tmp/mnt.6_6ywP/journal -> /dev/disk/by-partuuid/75a991e0-d8e1-414f-825d-1635edc8fbe5
[node1][WARNIN] command: Running command: /sbin/restorecon -R /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] unmount: Unmounting /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] command_check_call: Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.6_6ywP
[node1][WARNIN] get_dm_uuid: get_dm_uuid /dev/sdb uuid path is /sys/dev/block/8:16/dm/uuid
[node1][WARNIN] command_check_call: Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdb
[node1][DEBUG ] The operation has completed successfully.
[node1][WARNIN] update_partition: Calling partprobe on prepared device /dev/sdb
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
[node1][WARNIN] command: Running command: /sbin/partprobe /dev/sdb
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm settle --timeout=600
[node1][WARNIN] command_check_call: Running command: /usr/bin/udevadm trigger --action=add --sysname-match sdb1
[node
.........................................................