Настройка Ubuntu 18.04.3 для работы с Back-UPS ES 700G от APC
systemd-networkd-wait-online.service Failed to start Wait for Network to be Configured with bridge
Использование journalctl для управления журналированием в systemd
решение для поддержки virtualbox
$ sudo apt update
$ sudo apt upgrade
$ sudo apt install mc samba ssh
$ sudo reboot
$
sudo sh -c 'echo "deb [arch=amd64]
http://download.virtualbox.org/virtualbox/debian $(lsb_release -sc)
contrib" >> /etc/apt/sources.list.d/virtualbox.list'
$ wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
$ sudo apt update
$ sudo apt install virtualbox-6.0
#$ sudo apt autoremove --purge virtualbox-6.0
$ sudo usermod -a -G vboxusers $USER
$ sudo /sbin/vboxconfig
vboxdrv.sh: Stopping VirtualBox services.
vboxdrv.sh: Starting VirtualBox services.
vboxdrv.sh: Building VirtualBox kernel modules.
$ sudo reboot
$ sudo apt-get install bridge-utils -y
$ ip a
enp0s31f6 192.168.0.5
$ sudo nano /etc/netplan/01-netcfg.yaml
# This file describes the network interfaces available on your system
# For more information, see netplan(5).
network:
version: 2
renderer: networkd
ethernets:
enp0s31f6:
dhcp4: yes
bridges:
br0:
interfaces: [enp0s31f6]
addresses: [192.168.0.7/24]
gateway4: 192.168.0.1
mtu: 1500
nameservers:
addresses: [8.8.8.8]
parameters:
stp: true
forward-delay: 4
dhcp4: no
dhcp6: no
Сохранить.
$ sudo netplan generate
$ sudo netplan --debug apply
$ cat /proc/cpuinfo | grep MHz
$ sudo systemctl stop ondemand
$ sudo systemctl disable ondemand
$ nano tuning.sh
#!/bin/bash
# отключим ipv6
sudo /bin/su -c "echo 'net.ipv6.conf.all.disable_ipv6 = 1' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'net.ipv6.conf.default.disable_ipv6 = 1' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'net.ipv6.conf.lo.disable_ipv6 = 1' >> /etc/sysctl.conf"
sudo /bin/su -c "echo '#net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
#тюнинг postgresql
sudo /bin/su -c "echo 'vm.swappiness=1' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'kernel.sched_migration_cost_ns = 5000000' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'kernel.sched_autogroup_enabled = 0' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'vm.dirty_background_bytes = 67108864' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'vm.dirty_bytes = 536870912' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'vm.zone_reclaim_mode = 0' >> /etc/sysctl.conf"
#lxd
sudo /bin/su -c "echo 'fs.inotify.max_queued_events = 1048576' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'fs.inotify.max_user_instances = 1048576' >> /etc/sysctl.conf"
sudo /bin/su -c "echo 'fs.inotify.max_user_watches = 1048576' >> /etc/sysctl.conf"
sudo sysctl -p
Сохранить.
$ sudo sh
tuning.sh
$ sudo reboot
$ mkdir archive
$ ls -l /dev/disk/by-uuid
$ sudo nano /etc/fstab
Добавить:
UUID=5238c225-69c3-4c28-a633-5177867f0696 /home/user/archive ext4 defaults 0 0
Сохранить.
$ sudo reboot
Конвертировать OVA в Qcow2
1. На хосте созаем файл (берем с машины где стоят драйвера)
$ sudo nano /etc/udev/rules.d/80-hasp.rules
# HASP rules
ACTION=="add|change|bind", SUBSYSTEM=="usb", ATTRS{idVendor}=="0529",
ATTRS{idProduct}=="0001", MODE="664", ENV{HASP}="1",
SYMLINK+="aks/hasp/%k", RUN+="/usr/sbin/aksusbd_x86_64 -c
$root/aks/hasp/$kernel"
ACTION=="remove", ENV{HASP}=="1", RUN+="/usr/sbin/aksusbd_x86_64 -r $root/aks/hasp/$kernel"
# SENTINEL rules
ACTION=="add|change|bind", SUBSYSTEM=="usb", ATTRS{idVendor}=="0529",
ATTRS{idProduct}=="0003", KERNEL!="hiddev*", MODE="666",
GROUP="plugdev", ENV{SENTINELHID}="1", SYMLINK+="aks/sentinelhid/%k"
# SENTINEL rules для второго серверного ключа добавить еще запись:
ACTION=="add|change|bind", SUBSYSTEM=="usb",
ATTRS{idVendor}=="0529", ATTRS{idProduct}=="0004", KERNEL!="hiddev*",
MODE="666", GROUP="plugdev", ENV{SENTINELHID}="1",
SYMLINK+="aks/sentinelhid/%k"
$ sudo reboot
$ ls -alFhR /dev/aks/
$ sudo lxd init
Would you like to use LXD clustering? (yes/no) [default=no]:
Do you want to configure a new storage pool? (yes/no) [default=yes]:
Name of the new storage pool [default=default]:
Name of the storage backend to use (btrfs, dir, lvm) [default=btrfs]: dir
Would you like to connect to a MAAS server? (yes/no) [default=no]:
Would you like to create a new local network bridge? (yes/no) [default=yes]: no
Would you like to configure LXD to use an existing bridge or host interface? (yes/no) [default=no]: yes
Name of the existing bridge or host interface: br0
Would you like LXD to be available over the network? (yes/no) [default=no]:
Would you like stale cached images to be updated automatically? (yes/no) [default=yes]
Would you like a YAML "lxd init" preseed to be printed? (yes/no) [default=no]:
Создадим пользователя lxduser для работы с консолью контейнеров без доступа sudo
$ sudo adduser lxduser
$ sudo adduser lxduser lxd
от него и будем работать в дальнейшем.
$ sudo su - lxduser
Восстанавливаем контейнер gitlab:
$ lxc image import gitlab-2019-12-06.tar.gz --alias gitlab-backup
$ lxc launch gitlab-backup gitlab
$ lxc image list
$ lxc image delete gitlab-backup
Восстанавливаем контейнер vbox:
$ lxc image import vbox.tar.gz --alias vbox-backup
Образ, импортирован с снимком: 4cc93c869cbc7686c61eabed84579b8386f4633ac1e0f7b7493c87274d000
$ lxc launch vbox-backup vbox
$ lxc stop vbox
На хосте проверим:
user@lxd:~$ ls -la /dev | grep vbox
crw-rw---- 1 root vboxusers 10, 55 дек 5 16:18 vboxdrv
crw-rw-rw- 1 root root 10, 54 дек 5 16:18 vboxdrvu
crw-rw---- 1 root vboxusers 10, 53 дек 5 16:18 vboxnetctl
drwxr-x--- 3 root vboxusers 60 дек 5 16:18 vboxusb
#$ lxc start vbox
$ lxc config device add vbox vboxdrv unix-char path=/dev/vboxdrv mode=0777
$ lxc config device add vbox vboxdrvu unix-char path=/dev/vboxdrvu mode=0777
$ lxc config device add vbox vboxnetctl unix-char path=/dev/vboxnetctl mode=0777
#$ lxc config set vbox linux.kernel_modules overlay
#$ lxc config set vbox security.privileged true
$ printf 'lxc.cgroup.devices.allow = c 10:53 rwm lxc.cgroup.devices.allow = c 10:54 rwm lxc.cgroup.devices.allow = c 10:55 rwm' | lxc config set vbox raw.lxc -
Проверить:
$ lxc config edit vbox
......
image.release: bionic
raw.lxc: lxc.cgroup.devices.allow = c 10:53 rwm lxc.cgroup.devices.allow = c 10:54
rwm lxc.cgroup.devices.allow = c 10:55 rwm
.......
$ lxc start vbox
$ lxc image import test-2019-11-12.tar.gz --alias test-backup
Образ, импортирован с снимком: f442e1c60b9c1353f3d665a61e67bb7c13d383fe65af530fbe7f482699233
$ lxc launch test-backup srv1c
$ lxc config device add srv1c hasp3 unix-char path=/dev/bus/usb/001/003 mode=0777
$ lxc config device add srv1c hasp4 unix-char path=/dev/bus/usb/001/004 mode=0777
$ lxc config device add srv1c aks2 unix-char path=/dev/aks/hasp/1-3 mode=0777
$ lxc config device add srv1c aks3 unix-char path=/dev/aks/hasp/1-4 mode=0777
#$ lxc config set srv1c security.privileged true
$ printf 'lxc.cgroup.devices.allow = c 198:* rwm' | lxc config set srv1c raw.lxc -
$ lxc config edit srv1c
$ lxc restart srv1c
$ lxc exec srv1c -- rm -rf /home/usr1cv8/.server1c-8.3.15.1700
$ lxc restart srv1c
Поскольку сервер рабочий настроим архивацию
Настройка Ubuntu 16.04, 18.04
$ sudo nano /etc/default/rsync
#RSYNC_ENABLE=false
RSYNC_ENABLE=true
Сохранить.
$ sudo nano /etc/rsyncd.conf
[backup]
# destination directory to copy
path = /home/user/archive
# hosts you allow to access
hosts allow = 192.168.0.0/255.255.255.0
hosts deny = *
list = true
uid = user
gid = user
read only = false
Сохранить.
#
$ sudo mkdir -p /home/user/archive
$ sudo systemctl enable rsync
$ sudo systemctl start rsync
Под windows 10 :
Можно сделать один скрипт и поставить в планировщик:
ping -n 1 -w 1000 192.168.0.5|Find "TTL=">nul
If %ErrorLevel%==0 (
CALL d:\Copy\rsync\rsync -avz /cygdrive/D/Copy/Archiv 192.168.0.5::backup --progress
) Else (
Echo Offline
)
$ sudo crontab -e
@reboot fstrim / -v
$ nano backup_lxd.sh
#!/usr/bin/env bash
set -ex
BACKUP_DIR=/home/user/archive/lxd/backup
HOSTS=$(lxc list -c n --format csv)
for HOST in ${HOSTS}
do
BACKUP_NAME=${HOST}-$(date +"%Y-%m-%d")
lxc snapshot ${HOST} auto-backup
lxc publish ${HOST}/auto-backup --alias ${BACKUP_NAME}
#lxc image export ${BACKUP_NAME} ${BACKUP_DIR}/${BACKUP_NAME}.tar.gz
lxc image export ${BACKUP_NAME} ${BACKUP_DIR}/${BACKUP_NAME}
lxc image delete ${BACKUP_NAME}
lxc delete ${HOST}/auto-backup
done
Сохранить.
$ sh backup_lxd.sh
+ date +%Y-%m-%d
+ BACKUP_NAME=ud1804-2019-11-10
+ lxc snapshot ud1804 auto-backup
+ lxc publish ud1804/auto-backup --alias ud1804-2019-11-10
Контейнер публикуется с отпечатком: b900ce0db69f0a57afd52af682f8a47229b5ea8fcdfa2a566af2034a6f6766a2
+ lxc image export ud1804-2019-11-10 /home/user/backup/ud1804-2019-11-10.tar.gz
Image exported successfully!
+ lxc image delete ud1804-2019-11-10
+ lxc delete ud1804/auto-backup
Восстановление на другом компьютере:
$ lxc image import ud1804-2019-11-10.tar.gz.tar.gz --alias ud1804-backup
Образ, импортирован с снимком: b900ce0db69f0a57afd52af682f8a47229b5ea8fcdfa2a566af2034a6f6766a2
$ lxc launch ud1804-backup ud1804
#lxc image delete ud1804-backup
$ lxc info gitlab
$ lxc delete gitlab/auto-backup
$ lxc image list
$ lxc image delete gitlab-2019-12-06