ceph 版本:14.2.X
安装
yum install ceph-osd
错误
ceph-volume lvm prepare --data /dev/sdc --bluestore
Running command: /bin/ceph-authtool --gen-print-key
Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new da6e54a7-9a64-42a3-8382-cc5986a28b17
stderr: [errno 95] error connecting to the cluster
--> RuntimeError: Unable to create a new OSD id
新建一个ceph.keyring
这里的key 可以从ceph auth get client.bootstrap-osd 里面得到
echo "[client.bootstrap-osd]" >> /var/lib/ceph/bootstrap-osd/ceph.keyring
echo " key = AQBlk8ZeFGKYIxAAcearGaypb5xKheYVO0Dxyg==">> /var/lib/ceph/bootstrap-osd/ceph.keyring
cat /var/lib/ceph/bootstrap-osd/ceph.keyring
[client.bootstrap-osd]
key = AQD9qhRcBjs+MRAAKGOWtabeQlc/HvVa+yemvQ==
创建osd
# ceph-volume lvm prepare --bluestore --data /dev/sdc
Running command: /bin/ceph-authtool --gen-print-key
Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new d8830d3e-55ea-47bc-b54a-aad3857df82d
Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-e0b5b8d9-0471-46b3-9cbf-f940153946a3 /dev/sdc
stdout: Physical volume "/dev/sdc" successfully created.
stdout: Volume group "ceph-e0b5b8d9-0471-46b3-9cbf-f940153946a3" successfully created
Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-d8830d3e-55ea-47bc-b54a-aad3857df82d ceph-e0b5b8d9-0471-46b3-9cbf-f940153946a3
stdout: Logical volume "osd-block-d8830d3e-55ea-47bc-b54a-aad3857df82d" created.
Running command: /bin/ceph-authtool --gen-print-key
Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-2
Running command: /bin/chown -h ceph:ceph /dev/ceph-e0b5b8d9-0471-46b3-9cbf-f940153946a3/osd-block-d8830d3e-55ea-47bc-b54a-aad3857df82d
Running command: /bin/chown -R ceph:ceph /dev/dm-1
Running command: /bin/ln -s /dev/ceph-e0b5b8d9-0471-46b3-9cbf-f940153946a3/osd-block-d8830d3e-55ea-47bc-b54a-aad3857df82d /var/lib/ceph/osd/ceph-2/block
Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap
stderr: got monmap epoch 1
Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-2/keyring --create-keyring --name osd.2 --add-key AQCnP0JdKEtIEhAA5d0X8RrtF1dxxWrit8Cfjg==
stdout: creating /var/lib/ceph/osd/ceph-2/keyring
added entity osd.2 auth(key=AQCnP0JdKEtIEhAA5d0X8RrtF1dxxWrit8Cfjg==)
Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring
Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/
Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid d8830d3e-55ea-47bc-b54a-aad3857df82d --setuser ceph --setgroup ceph
--> ceph-volume lvm prepare successful for: /dev/sdc
激活
ceph-volume lvm activate --bluestore 2 d8830d3e-55ea-47bc-b54a-aad3857df82d
Running command: ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/vg_data0/lv_data0 --path /var/lib/ceph/osd/ceph-7
Running command: ln -snf /dev/vg_data0/lv_data0 /var/lib/ceph/osd/ceph-7/block
Running command: chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block
Running command: chown -R ceph:ceph /dev/dm-0
Running command: chown -R ceph:ceph /var/lib/ceph/osd/ceph-7
Running command: systemctl enable ceph-volume@lvm-7-6a8f5bd2-dc3b-47d0-9f03-fc2129e3318f
stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-7-6a8f5bd2-dc3b-47d0-9f03-fc2129e3318f.service to /usr/lib/systemd/system/ceph-volume@.service.
Running command: systemctl enable --runtime ceph-osd@7
stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@7.service to /usr/lib/systemd/system/ceph-osd@.service.
Running command: systemctl start ceph-osd@7
--> ceph-volume lvm activate successful for osd ID: 7
# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.03018 root default
-3 0.03018 host localhost
1 hdd 0.01559 osd.1 up 1.00000 1.00000
2 hdd 0.01459 osd.2 up 1.00000 1.00000
直接创建ceph-volume lvm create --data /dev/sdc --bluestore
ceph-volume lvm create --data /dev/sdc --bluestore
Running command: /bin/ceph-authtool --gen-print-key
Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new c257b41e-2cb6-43e1-8932-6c2a3a4026ea
Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac /dev/sdc
stdout: Physical volume "/dev/sdc" successfully created.
stdout: Volume group "ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac" successfully created
Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-c257b41e-2cb6-43e1-8932-6c2a3a4026ea ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac
stdout: Logical volume "osd-block-c257b41e-2cb6-43e1-8932-6c2a3a4026ea" created.
Running command: /bin/ceph-authtool --gen-print-key
Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-1
Running command: /bin/chown -h ceph:ceph /dev/ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac/osd-block-c257b41e-2cb6-43e1-8932-6c2a3a4026ea
Running command: /bin/chown -R ceph:ceph /dev/dm-1
Running command: /bin/ln -s /dev/ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac/osd-block-c257b41e-2cb6-43e1-8932-6c2a3a4026ea /var/lib/ceph/osd/ceph-1/block
Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
stderr: got monmap epoch 1
Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQBNp0Ndma5OChAAhEmdNOPTWnE1D/0lqqJ/Tw==
stdout: creating /var/lib/ceph/osd/ceph-1/keyring
stdout: added entity osd.1 auth(key=AQBNp0Ndma5OChAAhEmdNOPTWnE1D/0lqqJ/Tw==)
Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid c257b41e-2cb6-43e1-8932-6c2a3a4026ea --setuser ceph --setgroup ceph
--> ceph-volume lvm prepare successful for: /dev/sdc
Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac/osd-block-c257b41e-2cb6-43e1-8932-6c2a3a4026ea --path /var/lib/ceph/osd/ceph-1 --no-mon-config
Running command: /bin/ln -snf /dev/ceph-4f1b231b-a24b-4197-90cf-d8ef6ec189ac/osd-block-c257b41e-2cb6-43e1-8932-6c2a3a4026ea /var/lib/ceph/osd/ceph-1/block
Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block
Running command: /bin/chown -R ceph:ceph /dev/dm-1
Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
Running command: /bin/systemctl enable ceph-volume@lvm-1-c257b41e-2cb6-43e1-8932-6c2a3a4026ea
stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-1-c257b41e-2cb6-43e1-8932-6c2a3a4026ea.service to /usr/lib/systemd/system/ceph-volume@.service.
Running command: /bin/systemctl enable --runtime ceph-osd@1
stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@1.service to /usr/lib/systemd/system/ceph-osd@.service.
Running command: /bin/systemctl start ceph-osd@1
--> ceph-volume lvm activate successful for osd ID: 1
--> ceph-volume lvm create successful for: /dev/sdc
ceph-volume lvm activate
该命令执行需要提供osd id 和 uuid
-查看osd uuid
ceph osd dump
-激活osd
ceph-volume lvm activate --bluestore 0 0263644D-0BF1-4D6D-BC34-28BD98AE3BC8
也可以执行,对已经启动的osd没有影响
ceph-volume lvm activate --all
ceph-volume lvm activate --all
--> OSD ID 9 FSID 9078fbcc-1faf-49a8-ba5a-a51a7f81f91a process is active. Skipping activation
ceph-volume lvm create
该命令包含prepare和activate两个命令,执行完成后会立即让osd进入集群,分别使用前两个命令可以避免数据立即均衡
ceph-volume lvm list
列出当前的lvm osd
-以json格式输出--format=json
ceph-volume lvm list
ceph-volume lvm zap
用来清除lvm格式的osd
-逻辑卷
ceph-volume lvm zap {vg name/lv name}
-块设备
ceph-volume lvm zap /dev/sdc1
如果要重建osd
ceph-volume lvm zap /dev/sdc --destroy
如果遇到逻辑卷无法删除
dmsetup remove {lv name}