|
|
@ -65,14 +65,10 @@ ff02::2 ip6-allrouters
|
|
|
|
10.68.68.2 sf-002-migrate
|
|
|
|
10.68.68.2 sf-002-migrate
|
|
|
|
10.68.68.3 sf-003-migrate
|
|
|
|
10.68.68.3 sf-003-migrate
|
|
|
|
10.68.68.4 sf-004-migrate
|
|
|
|
10.68.68.4 sf-004-migrate
|
|
|
|
10.99.1.1 sf-001-file1
|
|
|
|
10.99.99.1 sf-001-file
|
|
|
|
10.99.1.2 sf-002-file1
|
|
|
|
10.99.99.2 sf-002-file
|
|
|
|
10.99.1.3 sf-003-file1
|
|
|
|
10.99.99.3 sf-003-file
|
|
|
|
10.99.1.4 sf-004-file1
|
|
|
|
10.99.99.4 sf-004-file
|
|
|
|
10.99.2.1 sf-001-file2
|
|
|
|
|
|
|
|
10.99.2.2 sf-002-file2
|
|
|
|
|
|
|
|
10.99.2.3 sf-003-file2
|
|
|
|
|
|
|
|
10.99.2.4 sf-004-file2
|
|
|
|
|
|
|
|
EOF
|
|
|
|
EOF
|
|
|
|
|
|
|
|
|
|
|
|
cd /etc ; git commit -m 'Add hosts' hosts
|
|
|
|
cd /etc ; git commit -m 'Add hosts' hosts
|
|
|
@ -144,13 +140,115 @@ pvecm add 10.3.1.1 --ring0_addr 10.3.1.4 --ring1_addr 10.3.2.4 --use_ssh
|
|
|
|
pvecm status
|
|
|
|
pvecm status
|
|
|
|
pvecm nodes
|
|
|
|
pvecm nodes
|
|
|
|
|
|
|
|
|
|
|
|
# rebootz ?
|
|
|
|
cd /etc ; git add . ; git commit -a -m 'Setup Proxmox cluster'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# After Cluster is Configured
|
|
|
|
# After Cluster is Configured
|
|
|
|
# ===========================
|
|
|
|
# ===========================
|
|
|
|
|
|
|
|
|
|
|
|
# Now you only have to log into one Proxmox node to control them all.
|
|
|
|
# Now you only have to log into one Proxmox node to control them all.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# XXX Ceph
|
|
|
|
|
|
|
|
echo "Testing file pings"
|
|
|
|
|
|
|
|
for i in sf-001-file sf-002-file sf-003-file sf-004-file
|
|
|
|
|
|
|
|
do ping -q -c1 $i
|
|
|
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Install Ceph
|
|
|
|
|
|
|
|
# Run this on all nodes:
|
|
|
|
|
|
|
|
pveceph install
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -a -m 'Install Ceph'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Run this on just sf-001
|
|
|
|
|
|
|
|
pveceph init --network 10.99.99.0/24
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -m 'Ceph init' ceph/ pve/ceph.conf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# on sf-002 sf-003 sf-004
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -m 'Ceph init' pve/ceph.conf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# DONT RUN SIMULTANEOUSLY XXX
|
|
|
|
|
|
|
|
# Run on sf-001
|
|
|
|
|
|
|
|
pveceph createmon
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -m 'Ceph createmon'
|
|
|
|
|
|
|
|
# Do the other ones via web gui
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -m 'Add Ceph mons via web gui'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Looks like this auto creates. Not needed
|
|
|
|
|
|
|
|
#pveceph createmgr
|
|
|
|
|
|
|
|
#cd /etc ; git add . ; git commit -m 'Ceph createmgr'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Create GPT partition table on each Ceph drive, with correct device name:
|
|
|
|
|
|
|
|
# gdisk /dev/sd[X]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# sf-001
|
|
|
|
|
|
|
|
# Then 'w' to write new GPT table
|
|
|
|
|
|
|
|
gdisk /dev/sdb
|
|
|
|
|
|
|
|
gdisk /dev/sdc
|
|
|
|
|
|
|
|
gdisk /dev/sdd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# sf-002
|
|
|
|
|
|
|
|
gdisk /dev/sdb
|
|
|
|
|
|
|
|
gdisk /dev/sdc
|
|
|
|
|
|
|
|
gdisk /dev/sdd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# sf-003
|
|
|
|
|
|
|
|
# Create blank GPT ?
|
|
|
|
|
|
|
|
gdisk /dev/nvme0n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme1n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme2n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme3n1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# sf-004
|
|
|
|
|
|
|
|
gdisk /dev/nvme0n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme1n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme2n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme3n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme4n1
|
|
|
|
|
|
|
|
gdisk /dev/nvme5n1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Add OSDs
|
|
|
|
|
|
|
|
# Do this via web, not command line.
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -m 'Add Ceph OSDs via web gui'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Via Web GUI, add pools:
|
|
|
|
|
|
|
|
# Name: sharkpool
|
|
|
|
|
|
|
|
# Size: 3 (default)
|
|
|
|
|
|
|
|
# Min Size: 2 (default)
|
|
|
|
|
|
|
|
# Crush Rule: replicated rule (default)
|
|
|
|
|
|
|
|
# pg_num: 64 (default) XXX try 256
|
|
|
|
|
|
|
|
# Add storages: unchecked (default)
|
|
|
|
|
|
|
|
cd /etc ; git add . ; git commit -m 'Add Ceph pools via web gui'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## Add keyring so it can be PVE ceph client
|
|
|
|
|
|
|
|
# Not needed ???
|
|
|
|
|
|
|
|
## Just on sf-001
|
|
|
|
|
|
|
|
#mkdir /etc/pve/priv/ceph
|
|
|
|
|
|
|
|
#cp /etc/pve/priv/ceph.client.admin.keyring /etc/pve/priv/ceph/my-ceph-storage.keyring
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# In web GUI, go to:
|
|
|
|
|
|
|
|
# Datacenter -> storage
|
|
|
|
|
|
|
|
# and add and RDB (PVE) container/images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
# XXX
|
|
|
|
|
|
|
|
# Reinstall if things go bad:
|
|
|
|
|
|
|
|
/etc/init.d/ceph stop ; apt -y purge ceph ceph-base ceph-mgr ceph-mon ceph-osd ; apt -y autoremove --purge ; mv /var/lib/ceph/ /var/lib/foobar-ceph ; rm -rf /etc/pve/ceph* /etc/ceph/ceph.conf /etc/pve/priv/ceph* /etc/systemd/system/ceph-mon.target.wants /etc/systemd/system/ceph-mon.target.wants/ceph-mon@sf-001.service /etc/ceph/ceph.client.admin.keyring
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
apt -y --reinstall install ceph-common
|
|
|
|
|
|
|
|
# rm stuff that isn't in ceph-common
|
|
|
|
|
|
|
|
find /etc -name "*ceph*"
|
|
|
|
|
|
|
|
pveceph install
|
|
|
|
|
|
|
|
# XXX
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# MISC
|
|
|
|
# Data Center --> Permissions --> Users
|
|
|
|
# Data Center --> Permissions --> Users
|
|
|
|
# Add user with Realm Proxmox VE authentication server.
|
|
|
|
# Add user with Realm Proxmox VE authentication server.
|
|
|
|
# Give user root permissions: Datacenter --> Permissions --> Add --> User permission.
|
|
|
|
# Give user root permissions: Datacenter --> Permissions --> Add --> User permission.
|
|
|
@ -166,30 +264,3 @@ pvecm nodes
|
|
|
|
# Add servers:
|
|
|
|
# Add servers:
|
|
|
|
# 208.67.222.222 208.67.220.220 37.235.1.174
|
|
|
|
# 208.67.222.222 208.67.220.220 37.235.1.174
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# XXX Ceph
|
|
|
|
|
|
|
|
echo "Testing file1 pings"
|
|
|
|
|
|
|
|
for i in sf-001-file1 sf-002-file1 sf-003-file1 sf-004-file1
|
|
|
|
|
|
|
|
do ping -q -c1 $i
|
|
|
|
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "Testing file2 pings"
|
|
|
|
|
|
|
|
for i in sf-001-file2 sf-002-file2 sf-003-file2 sf-004-file2
|
|
|
|
|
|
|
|
do ping -q -c1 $i
|
|
|
|
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "Testing file1 ssh"
|
|
|
|
|
|
|
|
for i in sf-001-file1 sf-002-file1 sf-003-file1 sf-004-file1
|
|
|
|
|
|
|
|
do ssh $i hostname
|
|
|
|
|
|
|
|
done
|
|
|
|
|
|
|
|
echo "Testing file2 ssh"
|
|
|
|
|
|
|
|
for i in sf-001-file2 sf-002-file2 sf-003-file2 sf-004-file2
|
|
|
|
|
|
|
|
do ssh $i hostname
|
|
|
|
|
|
|
|
done
|
|
|
|
|
|
|
|
#echo "Testing file1 ssh by IP"
|
|
|
|
|
|
|
|
#for i in 10.99.1.1 10.99.1.2 10.99.1.3 10.99.1.4
|
|
|
|
|
|
|
|
#do ssh $i hostname
|
|
|
|
|
|
|
|
#done
|
|
|
|
|
|
|
|
#echo "Testing file2 ssh by IP"
|
|
|
|
|
|
|
|
#for i in 10.99.2.1 10.99.2.2 10.99.2.3 10.99.2.4
|
|
|
|
|
|
|
|
#do ssh $i hostname
|
|
|
|
|
|
|
|
#done
|
|
|
|
|
|
|
|