You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

267 lines
6.7 KiB

#!/bin/bash
# forksand-sf-proxmox
# GPLv3+
# This script does some initial setup and config
# Sets up Proxmox.
# XXX DRAFT XXX
exit 0
# Run this on workstation:
# ssh -N -C -L 18021:localhost:8006 sf-001
# ssh -N -C -L 18022:localhost:8006 sf-002
# ssh -N -C -L 18023:localhost:8006 sf-003
# ssh -N -C -L 18024:localhost:8006 sf-004
# Only logging into one at a time is possible. Session management :| XXX
# firefox https://localhost:18021
# firefox https://localhost:18022
# firefox https://localhost:18023
# firefox https://localhost:18024
#
# Login as root user via PAM
# Set up Enterprise Key, if used
#
#
cd /etc ; git add . ; git commit -a -m 'Initial Proxmox configuration'
#
#
# XXX Set up vmbr0 via web interface.
#
# Network
# Bridges ? XXX
# Set up static IPs on each interface in Proxmox web gui
#
cd /etc ; git add . ; git commit -a -m 'Configure ethernet addresses'
# Reboot!
# Configure Corosync
# Set up hosts
# XXX MAKE SURE NEW NODES GET ADDED TO EXISTING SERVER /etc/hosts
cat > /etc/hosts <<EOF
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
10.22.22.1 sf-001.forksand.com sf-001
10.22.22.2 sf-002.forksand.com sf-002
10.22.22.3 sf-003.forksand.com sf-003
10.22.22.4 sf-004.forksand.com sf-004
10.3.1.1 sf-001-coro1
10.3.1.2 sf-002-coro1
10.3.1.3 sf-003-coro1
10.3.1.4 sf-004-coro1
10.3.2.1 sf-001-coro2
10.3.2.2 sf-002-coro2
10.3.2.3 sf-003-coro2
10.3.2.4 sf-004-coro2
10.68.68.1 sf-001-migrate
10.68.68.2 sf-002-migrate
10.68.68.3 sf-003-migrate
10.68.68.4 sf-004-migrate
10.99.99.1 sf-001-file
10.99.99.2 sf-002-file
10.99.99.3 sf-003-file
10.99.99.4 sf-004-file
EOF
cd /etc ; git commit -m 'Add hosts' hosts
# Test cluster ping
echo "Testing coro1 pings"
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1
do ping -q -c1 $i
done
echo "Testing coro2 pings"
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2
do ping -q -c1 $i
done
echo "Testing migrate pings"
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate
do ping -q -c1 $i
done
echo "Testing DMZ pings"
for i in sf-001 sf-002 sf-003 sf-004
do ping -q -c1 $i
done
# XXX set up ssh keys
# Add them to:
cat /root/.ssh/id_rsa.pub
# /etc/pve/priv/authorized_keys
cd /etc ; git commit -m 'Set up ssh keys' /etc/pve/priv/authorized_keys
# Test ssh
echo "Testing coro1 ssh"
for i in sf-001-coro1 sf-002-coro1 sf-003-coro1 sf-004-coro1
do ssh $i hostname
done
echo "Testing coro2 ssh"
for i in sf-001-coro2 sf-002-coro2 sf-003-coro2 sf-004-coro2
do ssh $i hostname
done
echo "Testing migrate ssh"
for i in sf-001-migrate sf-002-migrate sf-003-migrate sf-004-migrate
do ssh $i hostname
done
# ssh via IP
echo "Testing coro1 ssh by IP"
for i in 10.3.1.1 10.3.1.2 10.3.1.3 10.3.1.4
do ssh $i hostname
done
echo "Testing coro2 ssh by IP"
for i in 10.3.2.1 10.3.2.2 10.3.2.3 10.3.2.4
do ssh $i hostname
done
echo "Testing migrate ssh by IP"
for i in 10.68.68.1 10.68.68.2 10.68.68.3 10.68.68.4
do ssh $i hostname
done
# Run this on JUST ONE NODE, sf-001, to get the cluster started:
pvecm create sharkfork -bindnet0_addr 10.3.1.1 -ring0_addr 10.3.1.1 -bindnet1_addr 10.3.2.1 -ring1_addr 10.3.2.1
# Run on sf-002
pvecm add 10.3.1.1 --ring0_addr 10.3.1.2 --ring1_addr 10.3.2.2 --use_ssh
# Run on sf-003
pvecm add 10.3.1.1 --ring0_addr 10.3.1.3 --ring1_addr 10.3.2.3 --use_ssh
# Run on sf-004
pvecm add 10.3.1.1 --ring0_addr 10.3.1.4 --ring1_addr 10.3.2.4 --use_ssh
# Check status
pvecm status
pvecm nodes
cd /etc ; git add . ; git commit -a -m 'Setup Proxmox cluster'
# After Cluster is Configured
# ===========================
# Now you only have to log into one Proxmox node to control them all.
# XXX Ceph
echo "Testing file pings"
for i in sf-001-file sf-002-file sf-003-file sf-004-file
do ping -q -c1 $i
done
# Install Ceph
# Run this on all nodes:
pveceph install
cd /etc ; git add . ; git commit -a -m 'Install Ceph'
# Run this on just sf-001
pveceph init --network 10.99.99.0/24
cd /etc ; git add . ; git commit -m 'Ceph init' ceph/ pve/ceph.conf
# on sf-002 sf-003 sf-004
cd /etc ; git add . ; git commit -m 'Ceph init' pve/ceph.conf
# DONT RUN SIMULTANEOUSLY XXX
# Run on sf-001
pveceph createmon
cd /etc ; git add . ; git commit -m 'Ceph createmon'
# Do the other ones via web gui
#
cd /etc ; git add . ; git commit -m 'Add Ceph mons via web gui'
# Looks like this auto creates. Not needed
#pveceph createmgr
#cd /etc ; git add . ; git commit -m 'Ceph createmgr'
# Create GPT partition table on each Ceph drive, with correct device name:
# gdisk /dev/sd[X]
# sf-001
# Then 'w' to write new GPT table
gdisk /dev/sdb
gdisk /dev/sdc
gdisk /dev/sdd
# sf-002
gdisk /dev/sdb
gdisk /dev/sdc
gdisk /dev/sdd
# sf-003
# Create blank GPT ?
gdisk /dev/nvme0n1
gdisk /dev/nvme1n1
gdisk /dev/nvme2n1
gdisk /dev/nvme3n1
# sf-004
gdisk /dev/nvme0n1
gdisk /dev/nvme1n1
gdisk /dev/nvme2n1
gdisk /dev/nvme3n1
gdisk /dev/nvme4n1
gdisk /dev/nvme5n1
# Add OSDs
# Do this via web, not command line.
cd /etc ; git add . ; git commit -m 'Add Ceph OSDs via web gui'
# Via Web GUI, add pools:
# Name: sharkpool
# Size: 3 (default)
# Min Size: 2 (default)
# Crush Rule: replicated rule (default)
# pg_num: 64 (default) XXX try 256
# Add storages: unchecked (default)
cd /etc ; git add . ; git commit -m 'Add Ceph pools via web gui'
## Add keyring so it can be PVE ceph client
# Not needed ???
## Just on sf-001
#mkdir /etc/pve/priv/ceph
#cp /etc/pve/priv/ceph.client.admin.keyring /etc/pve/priv/ceph/my-ceph-storage.keyring
# In web GUI, go to:
# Datacenter -> storage
# and add and RDB (PVE) container/images
#
# XXX
# Reinstall if things go bad:
/etc/init.d/ceph stop ; apt -y purge ceph ceph-base ceph-mgr ceph-mon ceph-osd ; apt -y autoremove --purge ; mv /var/lib/ceph/ /var/lib/foobar-ceph ; rm -rf /etc/pve/ceph* /etc/ceph/ceph.conf /etc/pve/priv/ceph* /etc/systemd/system/ceph-mon.target.wants /etc/systemd/system/ceph-mon.target.wants/ceph-mon@sf-001.service /etc/ceph/ceph.client.admin.keyring
apt -y --reinstall install ceph-common
# rm stuff that isn't in ceph-common
find /etc -name "*ceph*"
pveceph install
# XXX
# MISC
# Data Center --> Permissions --> Users
# Add user with Realm Proxmox VE authentication server.
# Give user root permissions: Datacenter --> Permissions --> Add --> User permission.
# Path: / User: j Role: Administrator
# XXX Or create admin group, add perms to that...
# Permissions --> Authentication. Set Proxmox VE authentication server to default.
# Storage
# Datacenter --> Storage --> Edit local. Enable all content (add VZDump)
#
# DNS
# sf-003 (host) --> System --> DNS
# Add servers:
# 208.67.222.222 208.67.220.220 37.235.1.174
#