You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
351 lines
10 KiB
351 lines
10 KiB
#!/bin/bash
|
|
# forksand-sf-proxmox
|
|
# GPLv3+
|
|
# This script does some initial setup and config
|
|
# Sets up Proxmox.
|
|
|
|
# XXX DRAFT XXX
|
|
|
|
exit 0
|
|
# Run this on workstation:
|
|
# ssh -N -C -L 18021:localhost:8006 sf-001
|
|
# ssh -N -C -L 18022:localhost:8006 sf-002
|
|
# ssh -N -C -L 18023:localhost:8006 sf-003
|
|
# ssh -N -C -L 18024:localhost:8006 sf-004
|
|
# ssh -N -C -L 18025:localhost:8006 sf-005
|
|
|
|
# Only logging into one at a time is possible. Session management :| XXX
|
|
# firefox https://localhost:18021
|
|
# firefox https://localhost:18022
|
|
# firefox https://localhost:18023
|
|
# firefox https://localhost:18024
|
|
# firefox https://localhost:18025
|
|
#
|
|
# Login as root user via PAM
|
|
# Set up Enterprise Key, if used
|
|
#
|
|
#
|
|
cd /etc ; git add . ; git commit -a -m 'Initial Proxmox configuration'
|
|
#
|
|
#
|
|
# Network
|
|
# Set up static IPs on each interface in Proxmox web gui
|
|
# See this screenshot for an example from sf-004:
|
|
# forksand-it-manual/source/resources/images/proxmox-network.png
|
|
#
|
|
|
|
cd /etc ; git add . ; git commit -a -m 'Configure ethernet addresses'
|
|
|
|
# Reboot!
|
|
|
|
|
|
# Configure Corosync
|
|
# Set up hosts
|
|
# XXX MAKE SURE NEW NODES GET ADDED TO EXISTING SERVER /etc/hosts
|
|
# XXX Update existing hosts with new nodes!
|
|
cat > /etc/hosts <<EOF
|
|
127.0.0.1 localhost
|
|
|
|
# The following lines are desirable for IPv6 capable hosts
|
|
::1 localhost ip6-localhost ip6-loopback
|
|
ff02::1 ip6-allnodes
|
|
ff02::2 ip6-allrouters
|
|
|
|
10.22.22.1 sf-001.forksand.com sf-001
|
|
10.22.22.2 sf-002.forksand.com sf-002
|
|
10.22.22.3 sf-003.forksand.com sf-003
|
|
10.22.22.4 sf-004.forksand.com sf-004
|
|
10.22.22.5 sf-005.forksand.com sf-005
|
|
10.22.22.6 sf-006.forksand.com sf-006
|
|
10.22.22.7 sf-007.forksand.com sf-007
|
|
10.3.1.1 sf-001-coro1
|
|
10.3.1.2 sf-002-coro1
|
|
10.3.1.3 sf-003-coro1
|
|
10.3.1.4 sf-004-coro1
|
|
10.3.1.5 sf-005-coro1
|
|
10.3.1.6 sf-006-coro1
|
|
10.3.1.7 sf-007-coro1
|
|
10.3.2.1 sf-001-coro2
|
|
10.3.2.2 sf-002-coro2
|
|
10.3.2.3 sf-003-coro2
|
|
10.3.2.4 sf-004-coro2
|
|
10.3.2.5 sf-005-coro2
|
|
10.3.2.6 sf-006-coro2
|
|
10.3.2.7 sf-007-coro2
|
|
10.68.68.1 sf-001-migrate
|
|
10.68.68.2 sf-002-migrate
|
|
10.68.68.3 sf-003-migrate
|
|
10.68.68.4 sf-004-migrate
|
|
10.68.68.5 sf-005-migrate
|
|
10.68.68.6 sf-006-migrate
|
|
10.68.68.7 sf-007-migrate
|
|
10.99.99.1 sf-001-file
|
|
10.99.99.2 sf-002-file
|
|
10.99.99.3 sf-003-file
|
|
10.99.99.4 sf-004-file
|
|
10.99.99.5 sf-005-file
|
|
10.99.99.6 sf-006-file
|
|
10.99.99.7 sf-007-file
|
|
EOF
|
|
|
|
cd /etc ; git commit -m 'Add hosts' hosts
|
|
|
|
# Test cluster ping
|
|
echo "Testing coro1 pings"
|
|
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1 sf-006-coro1 sf-007-coro1
|
|
do ping -q -c1 $i
|
|
done
|
|
echo "Testing coro2 pings"
|
|
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2 sf-006-coro2 sf-007-coro2
|
|
do ping -q -c1 $i
|
|
done
|
|
echo "Testing migrate pings"
|
|
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate sf-006-migrate sf-007-migrate
|
|
do ping -q -c1 $i
|
|
done
|
|
echo "Testing DMZ pings"
|
|
for i in sf-001 sf-002 sf-003 sf-004 sf-005 sf-006 sf-007
|
|
do ping -q -c1 $i
|
|
done
|
|
|
|
# XXX set up ssh keys
|
|
# Add them to:
|
|
#cat /root/.ssh/id_rsa.pub
|
|
# /etc/pve/priv/authorized_keys
|
|
# XXX Note! Just write to one host (e.g. sf-001), not all simultaneously or this file can break (no locking) and will lock out nodes!
|
|
# XXX Note! this may not be needed for adding nodes. It can be done via web interface "Cluster Join"
|
|
cd /etc ; git commit -m 'Set up ssh keys' /etc/pve/priv/authorized_keys
|
|
|
|
# Test ssh
|
|
echo "Testing coro1 ssh"
|
|
#for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1 sf-006-coro1 sf-007-coro1
|
|
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1 sf-005-coro1
|
|
do ssh $i hostname
|
|
done
|
|
echo "Testing coro2 ssh"
|
|
#for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2 sf-006-coro2 sf-007-coro2
|
|
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2 sf-005-coro2
|
|
do ssh $i hostname
|
|
done
|
|
echo "Testing migrate ssh"
|
|
#for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate sf-006-migrate sf-007-migrate
|
|
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate sf-005-migrate
|
|
do ssh $i hostname
|
|
done
|
|
# ssh via IP
|
|
echo "Testing coro1 ssh by IP"
|
|
#for i in 10.3.1.1 10.3.1.2 10.3.1.3 10.3.1.4 10.3.1.5 10.3.1.6 10.3.1.7
|
|
for i in 10.3.1.1 10.3.1.2 10.3.1.3 10.3.1.4 10.3.1.5
|
|
do ssh $i hostname
|
|
done
|
|
echo "Testing coro2 ssh by IP"
|
|
#for i in 10.3.2.1 10.3.2.2 10.3.2.3 10.3.2.4 10.3.2.5 10.3.2.6 10.3.2.7
|
|
for i in 10.3.2.1 10.3.2.2 10.3.2.3 10.3.2.4 10.3.2.5
|
|
do ssh $i hostname
|
|
done
|
|
echo "Testing migrate ssh by IP"
|
|
#for i in 10.68.68.1 10.68.68.2 10.68.68.3 10.68.68.4 10.68.68.5 10.68.68.6 10.68.68.7
|
|
for i in 10.68.68.1 10.68.68.2 10.68.68.3 10.68.68.4 10.68.68.5
|
|
do ssh $i hostname
|
|
done
|
|
|
|
# Add new metal node to Proxmox cluster.
|
|
# Run this on JUST ONE NODE, sf-001, to get the cluster started.
|
|
# Doesn't need to be run again:
|
|
# pvecm create sharkfork -bindnet0_addr 10.3.1.1 -ring0_addr 10.3.1.1 -bindnet1_addr 10.3.2.1 -ring1_addr 10.3.2.1
|
|
|
|
# Run on sf-002
|
|
pvecm add 10.3.1.1 --ring0_addr 10.3.1.2 --ring1_addr 10.3.2.2 --use_ssh
|
|
# Run on sf-003
|
|
pvecm add 10.3.1.1 --ring0_addr 10.3.1.3 --ring1_addr 10.3.2.3 --use_ssh
|
|
# Run on sf-004
|
|
pvecm add 10.3.1.1 --ring0_addr 10.3.1.4 --ring1_addr 10.3.2.4 --use_ssh
|
|
|
|
# This can be done via web interface
|
|
# Run on sf-005
|
|
pvecm add 10.3.1.1 --ring0_addr 10.3.1.5 --ring1_addr 10.3.2.5 --use_ssh
|
|
# Run on sf-006
|
|
pvecm add 10.3.1.1 --ring0_addr 10.3.1.6 --ring1_addr 10.3.2.6 --use_ssh
|
|
# Run on sf-007
|
|
pvecm add 10.3.1.1 --ring0_addr 10.3.1.7 --ring1_addr 10.3.2.7 --use_ssh
|
|
|
|
# Go to an existing node, such as sf-002.
|
|
# Click "Join Information"
|
|
# Copy that.
|
|
# Log into new node Proxmox GUI, such as sf-005.
|
|
# Go to "Cluster Join" and paste the info from sf-002.
|
|
# XXX Don't take defaults for Corosync!
|
|
# For Corosync use the 10.3.1.0 and 10.3.2.0 networks, with the
|
|
# last number the host's number. For example sf-005:
|
|
# Corosync 1: 10.3.1.5
|
|
# Corosync 2: 10.3.2.5
|
|
# For sf-007
|
|
# Corosync 1: 10.3.1.7
|
|
# Corosync 2: 10.3.2.7
|
|
#
|
|
# Then enter root password for existing node, hit ok, and it workz!
|
|
# Then add to Ceph.
|
|
|
|
# Check status
|
|
pvecm status
|
|
pvecm nodes
|
|
|
|
cd /etc ; git add . ; git commit -a -m 'Setup Proxmox cluster'
|
|
|
|
|
|
# After Cluster is Configured
|
|
# ===========================
|
|
|
|
# Now you only have to log into one Proxmox node to control them all.
|
|
|
|
# XXX Ceph
|
|
echo "Testing file pings"
|
|
for i in sf-001-file sf-002-file sf-003-file sf-004-file sf-005-file sf-006-file sf-007-file
|
|
do ping -q -c1 $i
|
|
done
|
|
|
|
|
|
# Install Ceph
|
|
# Run this on all nodes. Needed on new nodes.
|
|
pveceph install
|
|
cd /etc ; git add . ; git commit -a -m 'Install Ceph'
|
|
|
|
# All nodes, enable rbd kernel module:
|
|
echo rbd >> /etc/modules
|
|
modprobe rbd
|
|
cd /etc ; git commit -m 'Use rbd kernel module' /etc/modules
|
|
|
|
|
|
# Run this on just sf-001
|
|
# Doesn't need to be run again:
|
|
# pveceph init --network 10.99.99.0/24
|
|
# cd /etc ; git add . ; git commit -m 'Ceph init' ceph/ pve/ceph.conf
|
|
|
|
# With new nodes, I think rest can be done via web gui XXX
|
|
# XXX It doesn't appear to set up Ceph symbolic links when adding
|
|
# nodes added after initial setup (e.g. sf-005).
|
|
# So run this, on nodes sf-005 and subsequent nodes:
|
|
cd /etc/ceph/
|
|
ln -s /etc/pve/ceph.conf ceph.conf
|
|
# It also needs a key file, copy onto new nodes, sf-005 and newer:
|
|
scp -p sf-001:/etc/ceph/ceph.client.admin.keyring .
|
|
cd /etc ; git commit -a -m 'Add ceph config'
|
|
# Reboot...
|
|
|
|
# on sf-002 sf-003 sf-004 sf-005 sf-006 sf-007
|
|
cd /etc ; git add . ; git commit -a -m 'Ceph setup'
|
|
|
|
# DONT RUN SIMULTANEOUSLY XXX
|
|
# Run on sf-001
|
|
# Does not need to be run again:
|
|
# pveceph createmon
|
|
cd /etc ; git add . ; git commit -m 'Ceph createmon'
|
|
# Do the other ones via web gui
|
|
#
|
|
cd /etc ; git add . ; git commit -m 'Add Ceph mons via web gui'
|
|
|
|
|
|
# Looks like this auto creates. Not needed
|
|
#pveceph createmgr
|
|
#cd /etc ; git add . ; git commit -m 'Ceph createmgr'
|
|
|
|
|
|
# Create GPT partition table on each Ceph drive, with correct device name:
|
|
# gdisk /dev/sd[X]
|
|
# This can be done via web interface now too. :)
|
|
|
|
# sf-001
|
|
# Then 'w' to write new GPT table
|
|
gdisk /dev/sdb
|
|
gdisk /dev/sdc
|
|
gdisk /dev/sdd
|
|
|
|
# sf-002
|
|
gdisk /dev/sdb
|
|
gdisk /dev/sdc
|
|
gdisk /dev/sdd
|
|
|
|
# sf-003
|
|
# Create blank GPT ?
|
|
gdisk /dev/nvme0n1
|
|
gdisk /dev/nvme1n1
|
|
gdisk /dev/nvme2n1
|
|
gdisk /dev/nvme3n1
|
|
|
|
# sf-004
|
|
gdisk /dev/nvme0n1
|
|
gdisk /dev/nvme1n1
|
|
gdisk /dev/nvme2n1
|
|
gdisk /dev/nvme3n1
|
|
gdisk /dev/nvme4n1
|
|
gdisk /dev/nvme5n1
|
|
|
|
# sf-005
|
|
# Note /dev/sda is /boot and encrypted /
|
|
gdisk /dev/sdb
|
|
gdisk /dev/sdc
|
|
gdisk /dev/sdd
|
|
gdisk /dev/sde
|
|
gdisk /dev/sdf
|
|
|
|
# sf-006, sf-007
|
|
# Added via web interface
|
|
|
|
# Add OSDs
|
|
# Do this via web, not command line.
|
|
cd /etc ; git add . ; git commit -m 'Add Ceph OSDs via web gui'
|
|
|
|
# Via Web GUI, add pools:
|
|
# XXX Pools have changed since this doc.
|
|
# Name: sharkpool
|
|
# Size: 3 (default)
|
|
# Min Size: 2 (default)
|
|
# Crush Rule: replicated rule (default)
|
|
# pg_num: 64 (default) XXX try 256
|
|
# Add storages: unchecked (default)
|
|
cd /etc ; git add . ; git commit -m 'Add Ceph pools via web gui'
|
|
|
|
|
|
## Add keyring so it can be PVE ceph client
|
|
# Not needed ???
|
|
## Just on sf-001
|
|
#mkdir /etc/pve/priv/ceph
|
|
#cp /etc/pve/priv/ceph.client.admin.keyring /etc/pve/priv/ceph/my-ceph-storage.keyring
|
|
|
|
# In web GUI, go to:
|
|
# Datacenter -> storage
|
|
# and add and RDB (PVE) container/images
|
|
|
|
#
|
|
# XXX
|
|
# Reinstall if things go bad:
|
|
# /etc/init.d/ceph stop ; apt -y purge ceph ceph-base ceph-mgr ceph-mon ceph-osd ; apt -y autoremove --purge ; mv /var/lib/ceph/ /var/lib/foobar-ceph ; rm -rf /etc/pve/ceph* /etc/ceph/ceph.conf /etc/pve/priv/ceph* /etc/systemd/system/ceph-mon.target.wants /etc/systemd/system/ceph-mon.target.wants/ceph-mon@sf-001.service /etc/ceph/ceph.client.admin.keyring
|
|
|
|
apt -y --reinstall install ceph-common
|
|
# rm stuff that isn't in ceph-common
|
|
find /etc -name "*ceph*"
|
|
pveceph install
|
|
# XXX
|
|
|
|
|
|
|
|
|
|
# MISC
|
|
# Data Center --> Permissions --> Users
|
|
# Add user with Realm Proxmox VE authentication server.
|
|
# Give user root permissions: Datacenter --> Permissions --> Add --> User permission.
|
|
# Path: / User: j Role: Administrator
|
|
# XXX Or create admin group, add perms to that...
|
|
# Permissions --> Authentication. Set Proxmox VE authentication server to default.
|
|
|
|
# Storage
|
|
# Datacenter --> Storage --> Edit local. Enable all content (add VZDump)
|
|
#
|
|
# DNS
|
|
# sf-003 (host) --> System --> DNS
|
|
# Add servers:
|
|
# 208.67.222.222 208.67.220.220 37.235.1.174
|
|
#
|
|
# XXX Add remote syslogging, monitoring, backups, etc.
|
|
#
|