New Ceph nodes

master
Jeff Moe 6 years ago
parent c6c5751e57
commit c13f257b6f

@ -105,10 +105,17 @@ ceph osd crush rule create-replicated fast default host nvme
# Then do this to have pool use new rule:
ceph osd pool set nvmepool crush_rule fast
#ceph osd pool set nvmepool crush_rule fast
# nope
# Create disk platter crush rule for slow pool from command line:
ceph osd crush rule create-replicated slow default host hdd
# Then add pool with "slow" crush rule in web GUI
# Then add under Data Center --> Storage, the new pool. Use rbd, and krbd
##############################
# Change disk encryption password, check disk with crypto, then:
#cryptsetup -y luksAddKey /dev/sdb1

@ -212,14 +212,14 @@ cd /etc ; git add . ; git commit -a -m 'Install Ceph'
# All nodes, enable rbd kernel module:
echo rbd >> /etc/modules
modprobe rbd
cd /etc ; git commit -m 'Use rbd kernel module' /etc/modules
# Run this on just sf-001
# Doesn't need to be run again:
# pveceph init --network 10.99.99.0/24
cd /etc ; git add . ; git commit -m 'Ceph init' ceph/ pve/ceph.conf
# cd /etc ; git add . ; git commit -m 'Ceph init' ceph/ pve/ceph.conf
# With new nodes, I think rest can be done via web gui XXX
# XXX It doesn't appear to set up Ceph symbolic links when adding
@ -229,6 +229,7 @@ cd /etc/ceph/
ln -s /etc/pve/ceph.conf ceph.conf
# It also needs a key file, copy onto new nodes, sf-005 and newer:
scp -p sf-001:/etc/ceph/ceph.client.admin.keyring .
cd /etc ; git commit -a -m 'Add ceph config'
# Reboot...
# on sf-002 sf-003 sf-004 sf-005 sf-006 sf-007
@ -287,6 +288,9 @@ gdisk /dev/sdd
gdisk /dev/sde
gdisk /dev/sdf
# sf-006, sf-007
# Added via web interface
# Add OSDs
# Do this via web, not command line.
cd /etc ; git add . ; git commit -m 'Add Ceph OSDs via web gui'

Loading…
Cancel
Save