The cluster has 3 nodes for mon e storage.
Every node has one disk for OS and three disks for storage
vde_switch -n 20 -mod 660 -group users -daemon --mgmt /tmp/swmgmt0 -s /run/vde.ctl0
vde_plug2tap -d -s /run/vde.ctl0 tap0
qemu-img create -f qcow2 ubuntu-18.04.2-orig.qcow2 10G
After installing ubuntu on the VM I have installed Ceph ( apt-get -y install ceph-deploy ceph-common ceph-md ) before cloning to created the other nodes.
qemu-system-x86_64 -name ubuntu-18.04 -m 2G -cpu host -smp sockets=1,cores=2,threads=1 --enable-kvm -cdrom ubuntu-18.04.2-live-server-amd64.iso -drive file=ubuntu-18.04.2-orig.qcow2,if=virtio -device virtio-net-pci,netdev=net0,mac=92:b7:41:5f:a3:a1 -netdev vde ,id=net0,sock=/run/vde.ctl0 -monitor unix:/tmp/mon,server,nowait -vga qxl -spice port=5940,password=051ec34c -daemonize
Once the OS has been installed on a VM, prepare it for leaving just a minimum adaptation once cloned for the other nodes.
qemu-img convert -O qcow2 ubuntu-18.04.2-orig.qcow2 ubuntu-18.04.2-1.qcow2
qemu-img convert -O qcow2 ubuntu-18.04.2-orig.qcow2 ubuntu-18.04.2-2.qcow2
qemu-img convert -O qcow2 ubuntu-18.04.2-orig.qcow2 ubuntu-18.04.2-3.qcow2
qemu-img convert -O qcow2 ubuntu-18.04.2-orig.qcow2 ubuntu-18.04.2-host.qcow2
qemu-img create -f qcow2 ceph1-disk1 20G
qemu-img create -f qcow2 ceph1-disk2 20G
qemu-img create -f qcow2 ceph1-disk3 20G
qemu-img create -f qcow2 ceph2-disk1 20G
qemu-img create -f qcow2 ceph2-disk2 20G
qemu-img create -f qcow2 ceph2-disk3 20G
qemu-img create -f qcow2 ceph3-disk1 20G
qemu-img create -f qcow2 ceph3-disk2 20G
qemu-img create -f qcow2 ceph3-disk3 20G
qemu-system-x86_64 -name host -m 1G -cpu host -smp sockets=1,cores=1,threads=1 --enable-kvm -drive file=ubuntu-18.04.2-host.qcow2,if=virtio -device virtio-net-pci,netdev=net0,mac=92:b7:41:5f:a3:a4 -netdev vde,id=net0,sock=/run/vde.ctl0 -monitor unix:/tmp/mon4,server,nowait -display none -daemonize
ssh 192.168.0.111 'echo testing-vm > /etc/hostname; sed -i s/192.168.0.111/192.168.0.110/ /etc/netplan/50-cloud-init.yaml ;sync;reboot'
To check
ssh 192.168.0.110 'uname -n;ip a |grep "/24";ip r|grep default'
qemu-system-x86_64 -name ceph2 -m 1G -cpu host -smp sockets=1,cores=1,threads=1 --enable-kvm -drive file=ubuntu-18.04.2-2.qcow2,if=virtio -drive file=ceph2-disk1.qcow2,if=virtio -drive file=ceph2-disk2.qcow2,if=virtio -drive file=ceph2-disk3.qcow2,if=virtio -device virtio-net-pci,netdev=net0,mac=92:b7:41:5f:a3:a2 -netdev vde,id=net0,sock=/run/vde.ctl0 -monitor unix:/tmp/mon2,server,nowait -display none -daemonize
ssh 192.168.0.111 'echo sd-ceph-2 > /etc/hostname; sed -i s/192.168.0.111/192.168.0.112/ /etc/netplan/50-cloud-init.yaml ;sync;reboot'
qemu-system-x86_64 -name ceph3 -m 1G -cpu host -smp sockets=1,cores=1,threads=1 --enable-kvm -drive file=ubuntu-18.04.2-3.qcow2,if=virtio -drive file=ceph3-disk1.qcow2,if=virtio -drive file=ceph3-disk2.qcow2,if=virtio -drive file=ceph3-disk3.qcow2,if=virtio -device virtio-net-pci,netdev=net0,mac=92:b7:41:5f:a3:a3 -netdev vde,id=net0,sock=/run/vde.ctl0 -monitor unix:/tmp/mon3,server,nowait -display none -daemonize
ssh 192.168.0.111 'echo sd-ceph-3 > /etc/hostname; sed -i s/192.168.0.111/192.168.0.113/ /etc/netplan/50-cloud-init.yaml ;sync;reboot'
qemu-system-x86_64 -name ceph1 -m 1G -cpu host -smp sockets=1,cores=1,threads=1 --enable-kvm -drive file=ubuntu-18.04.2-1.qcow2,if=virtio -drive file=ceph1-disk1.qcow2,if=virtio -drive file=ceph1-disk2.qcow2,if=virtio -drive file=ceph1-disk3.qcow2,if=virtio -device virtio-net-pci,netdev=net0,mac=92:b7:41:5f:a3:a1 -netdev vde,id=net0,sock=/run/vde.ctl0 -monitor unix:/tmp/mon1,server,nowait -display none -daemonize
node ceph1 does not need any adjustment
del "-display none"
add " -vga qxl -spice port=5940,password=xxxxxx"
ssh-copy-id sd-ceph-2
ssh-copy-id sd-ceph-3
cd /etc/ceph
Add this line at the end of ceph.conf
public_network = 192.168.0.0/24
ceph-deploy new sd-ceph-1 sd-ceph-2 sd-ceph-3
ceph-deploy install sd-ceph-1 sd-ceph-2 sd-ceph-3
ceph-deploy mon create-initial
sd-ceph-1
ceph-deploy osd create sd-ceph-1:vdb
ceph-deploy osd create sd-ceph-1:vdc
ceph-deploy osd create sd-ceph-1:vdd
sd-ceph-2
ceph-deploy osd create sd-ceph-2:vdb
ceph-deploy osd create sd-ceph-2:vdc
ceph-deploy osd create sd-ceph-2:vdd
sd-ceph-3
ceph-deploy osd create sd-ceph-3:vdb
ceph-deploy osd create sd-ceph-3:vdc
ceph-deploy osd create sd-ceph-3:vdd
ceph -w detail
ceph health detail
ceph osd tree
e.g. OSD 7 is down
systemctl start -l ceph-osd@7
ceph-deploy --overwrite-conf mgr create sd-ceph-1
ceph-deploy mds create sd-ceph-2
ceph-deploy mds create sd-ceph-3
ceph osd pool create massi 128
ceph osd lspools
ceph tell mon.\* injectargs '--mon-allow-pool-delete=true'
ceph osd pool delete massi massi --yes-i-really-really-mean-it
For any problems with the conf file
cp /etc/ceph/ceph.conf /etc/ceph/ceph.conf-orig
then
ceph-deploy --overwrite-conf mds create sd-ceph-1
ceph osd pool get rdb pg_num
ceph osd pool get rdb pgp_num
ceph osd pool create cephfs_data 128
ceph osd pool create cephfs_metadata 128
ceph fs new cephfs cephfs_metadata cephfs_data
ceph fs ls
ceph mds stat
ceph osd lspools
ceph df detail
qemu-system-x86_64 -name host -m 1G -cpu host -smp sockets=1,cores=1,threads=1 --enable-kvm -drive file=ubuntu-18.04.2-host.qcow2,if=virtio -device virtio-net-pci,netdev=net0,mac=92:b7:41:5f:a3:a4 -netdev vde,id=net0,sock=/run/vde.ctl0 -monitor unix:/tmp/mon4,server,nowait -display none -daemonize
echo bionic > /etc/hostname
edit /etc/netplan/50-cloud-init.yaml
apt-get remove cloud-init
apt-get -y install ceph-deploy ceph-common ceph-mds
mkdir /media/ceph
chmod 777 /media/ceph
ssh 192.168.0.111 'cat /etc/ceph/ceph.client.admin.keyring'
mount.ceph 192.168.0.111,192.168.0.112,192.168.0.113:/ /media/ceph/ -o name=admin,secret=AQBfWEBdN1RaLhAAjDx1MzhXGwfuh/0l32ZQog==
df -h /media/ceph/
ceph osd set noout; ceph osd set nobackfill; ceph osd set norecover
Adapt the below script to poweroff all the nodes
for i in {2..3}; do ssh 192.168.0.11$i 'poweroff';sleep 10;done
ceph osd unset noout; ceph osd unset nobackfill; ceph osd unset norecover