Add new sf-005 metal Proxmox node

master
Jeff Moe 6 years ago
parent de46783cd5
commit 1b6aa7415a

@ -1,6 +1,21 @@
##############################################################################
Adding a new metal server node for Proxmox.
Allocate IP on firewall and set up port forwarding for ssh.
Run bootstrap file such as forksand-sf-005-bootstrap from
../sharkfork-bootstrap/forksand-sf-005-bootstrap
##############################################################################
Log into web interface:
Set up tunnel:
ssh -N -C -L 18025:localhost:8006 sf-005
Then in web browser go to:
https://localhost:18025/
Log in.
Set up Proxmox subscription key.
See:
../sharkfork-bootstrap/forksand-sf-proxmox
##############################################################################

@ -12,12 +12,14 @@ exit 0
# ssh -N -C -L 18022:localhost:8006 sf-002
# ssh -N -C -L 18023:localhost:8006 sf-003
# ssh -N -C -L 18024:localhost:8006 sf-004
# ssh -N -C -L 18025:localhost:8006 sf-005
# Only logging into one at a time is possible. Session management :| XXX
# firefox https://localhost:18021
# firefox https://localhost:18022
# firefox https://localhost:18023
# firefox https://localhost:18024
# firefox https://localhost:18025
#
# Login as root user via PAM
# Set up Enterprise Key, if used
@ -26,11 +28,10 @@ exit 0
cd /etc ; git add . ; git commit -a -m 'Initial Proxmox configuration'
#
#
# XXX Set up vmbr0 via web interface.
#
# Network
# Bridges ? XXX
# Set up static IPs on each interface in Proxmox web gui
# See this screenshot for an example from sf-004:
# forksand-it-manual/source/resources/images/proxmox-network.png
#
cd /etc ; git add . ; git commit -a -m 'Configure ethernet addresses'
@ -41,6 +42,7 @@ cd /etc ; git add . ; git commit -a -m 'Configure ethernet addresses'
# Configure Corosync
# Set up hosts
# XXX MAKE SURE NEW NODES GET ADDED TO EXISTING SERVER /etc/hosts
# XXX Update existing hosts with new nodes!
cat > /etc/hosts <<EOF
127.0.0.1 localhost
@ -53,81 +55,108 @@ ff02::2 ip6-allrouters
10.22.22.2 sf-002.forksand.com sf-002
10.22.22.3 sf-003.forksand.com sf-003
10.22.22.4 sf-004.forksand.com sf-004
10.22.22.5 sf-005.forksand.com sf-005
10.22.22.6 sf-006.forksand.com sf-006
10.22.22.7 sf-007.forksand.com sf-007
10.3.1.1 sf-001-coro1
10.3.1.2 sf-002-coro1
10.3.1.3 sf-003-coro1
10.3.1.4 sf-004-coro1
10.3.1.5 sf-005-coro1
10.3.1.6 sf-006-coro1
10.3.1.7 sf-007-coro1
10.3.2.1 sf-001-coro2
10.3.2.2 sf-002-coro2
10.3.2.3 sf-003-coro2
10.3.2.4 sf-004-coro2
10.3.2.5 sf-005-coro2
10.3.2.6 sf-006-coro2
10.3.2.7 sf-007-coro2
10.68.68.1 sf-001-migrate
10.68.68.2 sf-002-migrate
10.68.68.3 sf-003-migrate
10.68.68.4 sf-004-migrate
10.68.68.5 sf-005-migrate
10.68.68.6 sf-006-migrate
10.68.68.7 sf-007-migrate
10.99.99.1 sf-001-file
10.99.99.2 sf-002-file
10.99.99.3 sf-003-file
10.99.99.4 sf-004-file
10.99.99.5 sf-005-file
10.99.99.6 sf-006-file
10.99.99.7 sf-007-file
EOF
cd /etc ; git commit -m 'Add hosts' hosts
# Test cluster ping
echo "Testing coro1 pings"
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1
#for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1 sf-006-coro1 sf-007-coro1
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1
do ping -q -c1 $i
done
echo "Testing coro2 pings"
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2
#for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2 sf-006-coro2 sf-007-coro2
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2
do ping -q -c1 $i
done
echo "Testing migrate pings"
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate
#for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate sf-006-migrate sf-007-migrate
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate
do ping -q -c1 $i
done
echo "Testing DMZ pings"
for i in sf-001 sf-002 sf-003 sf-004
#for i in sf-001 sf-002 sf-003 sf-004 sf-005 sf-006 sf-007
for i in sf-001 sf-002 sf-003 sf-004 sf-005
do ping -q -c1 $i
done
# XXX set up ssh keys
# Add them to:
cat /root/.ssh/id_rsa.pub
#cat /root/.ssh/id_rsa.pub
# /etc/pve/priv/authorized_keys
# XXX Note! Just write to one host (e.g. sf-001), not all simultaneously or this file can break (no locking) and will lock out nodes!
# XXX Note! this may not be needed for adding nodes. It can be done via web interface "Cluster Join"
cd /etc ; git commit -m 'Set up ssh keys' /etc/pve/priv/authorized_keys
# Test ssh
echo "Testing coro1 ssh"
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1
#for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1 sf-006-coro1 sf-007-coro1
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1
do ssh $i hostname
done
echo "Testing coro2 ssh"
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2
#for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2 sf-006-coro2 sf-007-coro2
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2
do ssh $i hostname
done
echo "Testing migrate ssh"
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate
#for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate sf-006-migrate sf-007-migrate
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate
do ssh $i hostname
done
# ssh via IP
echo "Testing coro1 ssh by IP"
for i in 10.3.1.1 10.3.1.2 10.3.1.3 10.3.1.4
#for i in 10.3.1.1 10.3.1.2 10.3.1.3 10.3.1.4 10.3.1.5 10.3.1.6 10.3.1.7
for i in 10.3.1.1 10.3.1.2 10.3.1.3 10.3.1.4 10.3.1.5
do ssh $i hostname
done
echo "Testing coro2 ssh by IP"
for i in 10.3.2.1 10.3.2.2 10.3.2.3 10.3.2.4
#for i in 10.3.2.1 10.3.2.2 10.3.2.3 10.3.2.4 10.3.2.5 10.3.2.6 10.3.2.7
for i in 10.3.2.1 10.3.2.2 10.3.2.3 10.3.2.4 10.3.2.5
do ssh $i hostname
done
echo "Testing migrate ssh by IP"
for i in 10.68.68.1 10.68.68.2 10.68.68.3 10.68.68.4
#for i in 10.68.68.1 10.68.68.2 10.68.68.3 10.68.68.4 10.68.68.5 10.68.68.6 10.68.68.7
for i in 10.68.68.1 10.68.68.2 10.68.68.3 10.68.68.4 10.68.68.5
do ssh $i hostname
done
# Run this on JUST ONE NODE, sf-001, to get the cluster started:
pvecm create sharkfork -bindnet0_addr 10.3.1.1 -ring0_addr 10.3.1.1 -bindnet1_addr 10.3.2.1 -ring1_addr 10.3.2.1
# Add new metal node to Proxmox cluster.
# Run this on JUST ONE NODE, sf-001, to get the cluster started.
# Doesn't need to be run again:
# pvecm create sharkfork -bindnet0_addr 10.3.1.1 -ring0_addr 10.3.1.1 -bindnet1_addr 10.3.2.1 -ring1_addr 10.3.2.1
# Run on sf-002
pvecm add 10.3.1.1 --ring0_addr 10.3.1.2 --ring1_addr 10.3.2.2 --use_ssh
@ -136,6 +165,31 @@ pvecm add 10.3.1.1 --ring0_addr 10.3.1.3 --ring1_addr 10.3.2.3 --use_ssh
# Run on sf-004
pvecm add 10.3.1.1 --ring0_addr 10.3.1.4 --ring1_addr 10.3.2.4 --use_ssh
# This can be done via web interface
# Run on sf-005
pvecm add 10.3.1.1 --ring0_addr 10.3.1.5 --ring1_addr 10.3.2.5 --use_ssh
# Run on sf-006
pvecm add 10.3.1.1 --ring0_addr 10.3.1.6 --ring1_addr 10.3.2.6 --use_ssh
# Run on sf-007
pvecm add 10.3.1.1 --ring0_addr 10.3.1.7 --ring1_addr 10.3.2.7 --use_ssh
# Go to an existing node, such as sf-002.
# Click "Join Information"
# Copy that.
# Log into new node Proxmox GUI, such as sf-005.
# Go to "Cluster Join" and paste the info from sf-002.
# XXX Don't take defaults for Corosync!
# For Corosync use the 10.3.1.0 and 10.3.2.0 networks, with the
# last number the host's number. For example sf-005:
# Corosync 1: 10.3.1.5
# Corosync 2: 10.3.2.5
# For sf-007
# Corosync 1: 10.3.1.7
# Corosync 2: 10.3.2.7
#
# Then enter root password for existing node, hit ok, and it workz!
# Then add to Ceph.
# Check status
pvecm status
pvecm nodes
@ -150,26 +204,36 @@ cd /etc ; git add . ; git commit -a -m 'Setup Proxmox cluster'
# XXX Ceph
echo "Testing file pings"
for i in sf-001-file sf-002-file sf-003-file sf-004-file
for i in sf-001-file sf-002-file sf-003-file sf-004-file sf-005-file sf-006-file sf-007-file
do ping -q -c1 $i
done
# Install Ceph
# Run this on all nodes:
# Run this on all nodes. Needed on new nodes.
pveceph install
cd /etc ; git add . ; git commit -a -m 'Install Ceph'
# All nodes, enable rbd kernel module:
echo rbd >> /etc/modules
cd /etc ; git commit -m 'Use rbd kernel module' /etc/modules
# Reboot...
# With new nodes, I think rest can be done via web gui XXX
# Run this on just sf-001
pveceph init --network 10.99.99.0/24
# Doesn't need to be run again:
# pveceph init --network 10.99.99.0/24
cd /etc ; git add . ; git commit -m 'Ceph init' ceph/ pve/ceph.conf
# on sf-002 sf-003 sf-004
# on sf-002 sf-003 sf-004 sf-005 sf-006 sf-007
cd /etc ; git add . ; git commit -m 'Ceph init' pve/ceph.conf
# DONT RUN SIMULTANEOUSLY XXX
# Run on sf-001
pveceph createmon
# Does not need to be run again:
# pveceph createmon
cd /etc ; git add . ; git commit -m 'Ceph createmon'
# Do the other ones via web gui
#
@ -210,11 +274,20 @@ gdisk /dev/nvme3n1
gdisk /dev/nvme4n1
gdisk /dev/nvme5n1
# sf-005
# Note /dev/sda is /boot and encrypted /
gdisk /dev/sdb
gdisk /dev/sdc
gdisk /dev/sdd
gdisk /dev/sde
gdisk /dev/sdf
# Add OSDs
# Do this via web, not command line.
cd /etc ; git add . ; git commit -m 'Add Ceph OSDs via web gui'
# Via Web GUI, add pools:
# XXX Pools have changed since this doc.
# Name: sharkpool
# Size: 3 (default)
# Min Size: 2 (default)
@ -237,7 +310,7 @@ cd /etc ; git add . ; git commit -m 'Add Ceph pools via web gui'
#
# XXX
# Reinstall if things go bad:
/etc/init.d/ceph stop ; apt -y purge ceph ceph-base ceph-mgr ceph-mon ceph-osd ; apt -y autoremove --purge ; mv /var/lib/ceph/ /var/lib/foobar-ceph ; rm -rf /etc/pve/ceph* /etc/ceph/ceph.conf /etc/pve/priv/ceph* /etc/systemd/system/ceph-mon.target.wants /etc/systemd/system/ceph-mon.target.wants/ceph-mon@sf-001.service /etc/ceph/ceph.client.admin.keyring
# /etc/init.d/ceph stop ; apt -y purge ceph ceph-base ceph-mgr ceph-mon ceph-osd ; apt -y autoremove --purge ; mv /var/lib/ceph/ /var/lib/foobar-ceph ; rm -rf /etc/pve/ceph* /etc/ceph/ceph.conf /etc/pve/priv/ceph* /etc/systemd/system/ceph-mon.target.wants /etc/systemd/system/ceph-mon.target.wants/ceph-mon@sf-001.service /etc/ceph/ceph.client.admin.keyring
apt -y --reinstall install ceph-common
# rm stuff that isn't in ceph-common

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Loading…
Cancel
Save