Saturday, 13 December 2014

foreman rhev-h autoinstall

DEFAULT ovirt
TIMEOUT 20
PROMPT 0
LABEL ovirt
KERNEL boot/vmlinuz0
APPEND rootflags=loop initrd=boot/initrd0.img root=live:/rhevh-6.5-20140930.1.el6ev.iso BOOTIF=link storage_init rootfstype=auto ro liveimg check local_boot_trigger=<%= foreman_url("built") %>  management_server=rhevm.example.com:443 rhevm_admin_password=$1$1OIs7Iry$7iD0YeFzWMlphfu7ar1 adminpw=$1$1OIs7Iry$7iD0YeFMlphf7Or1 ssh_pwauth=1 hostname=<%= @host %> ip=<%=@host.ip %> netmask=<%=@host.subnet.mask %> gateway=<%=@host.subnet.gateway %> dns=<%=[@host.subnet.dns_primary,@host.subnet.dns_secondary].reject{|n| n.blank?}.join(',')%> ntp=ntp.ix.ru RD_NO_LVM rd_NO_MULTIPATH rootflags=ro crashkernel=128M elevator=deadline reinstall max_loop=256 rd_NO_LUKS rd_NO_MD rd_NO_DM

Tuesday, 5 August 2014

Как узнать Host ID при работе через FC

через утилиту systool, входящую в пакет sysfsutils (CentOS6)

[root@c1 ~]# systool -c fc_host -v | grep port_name
    port_name           = "0x5001438026682306"
[root@c1 ~]#

Загоны multipath'а

Если мы видим, что multipath загоняет, выдавая нам что-то типа такого:
multipath -ll
mpathr (3600c0ff00012e06d0000000000000000) dm-38
size=5.5T features='1 queue_if_no_path' hwhandler='0' wp=rw
mpathg (360000000000000000000000000000000) dm-8 ,
size=9.3G features='1 queue_if_no_path' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=0 status=enabled
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  |- #:#:#:# -   #:# failed faulty running
  `- #:#:#:# -   #:# failed faulty running
[root@c1 ~]# multipath -F
Aug 05 18:24:13 | mpathr: map in use
Aug 05 18:24:13 | mpathg: map in use


причем, мы абсолютно уверены, что со стороны хранилища мы уже давно поудаляли все луны и их там просто физически уже нет, то

[root@c1 ~]# rm /etc/multipath/bindings 
rm: remove regular file `/etc/multipath/bindings'? y
[root@c1 ~]# rm /etc/multipath/wwids 
rm: remove regular file `/etc/multipath/wwids'? y
[root@c1 ~]# service ^C
[root@c1 ~]# /etc/init.d/multipathd restart
ok
Stopping multipathd daemon:                                [  OK  ]
Starting multipathd daemon:                                [  OK  ]
[root@c1 ~]# multipath -F
[root@c1 ~]# multipath -v0
[root@c1 ~]# multipath -v4

[root@c1 ~]# multipath -ll
[root@c1 ~]# 

Friday, 28 March 2014

Apache Prefork and Worker

Prefork MPM uses multiple child processes with one thread each and each process handles one connection at a time.
Worker MPM uses multiple child processes with many threads each. Each thread handles one connection at a time.

Friday, 21 March 2014

Полезные алиасы git

git config --global alias.st status
git config --global alias.ci 'commit -v'
git config --global alias.br branch
git config --global alias.co checkout

Monday, 17 March 2014

Bind's statistics channel and selinux


Mar 17 18:58:19 dns01 named[12144]: /etc/named.conf:39: couldn't allocate statistics channel 127.0.0.1#8053: permission denied

# setenforce 0
# semanage port -a -t dns_port_t -p tcp 8053
# setenforce 1

/etc/init.d/named restart

Tuesday, 18 February 2014

Инкрементальные бэкапы с помощью tar



rm -rf /tmp/*

mkdir /tmp/test_filesystem/{a,b,c} -p

echo 1 > /tmp/test_filesystem/a/file1
echo 2 > /tmp/test_filesystem/b/file2
echo 3 > /tmp/test_filesystem/b/file3

tar cvzf /tmp/archive.1.tar.gz --no-check-device --listed-incremental=/tmp/test.snar /tmp/test_filesystem

echo 4 > /tmp/test_filesystem/b/file4
echo 5 > /tmp/test_filesystem/b/file5

tar cvzf /tmp/archive.2.tar.gz --no-check-device --listed-incremental=/tmp/test.snar /tmp/test_filesystem

rm /tmp/test_filesystem/a/file1 -f

tar cvzf /tmp/archive.3.tar.gz --no-check-device --listed-incremental=/tmp/test.snar /tmp/test_filesystem

mkdir /tmp/test-extract



чтобы восстановить состояние файловой системы, восстанавливаем архивы в порядке возрастания номеров архивов:



первым идет полный архив:
tar xvf /tmp/archive.1.tar.gz -C /tmp/test-extract

потом инкрементальные, с указание соответствующей опции:
tar xvf /tmp/archive.2.tar.gz --incremental -C /tmp/test-extract
tar xvf /tmp/archive.3.tar.gz --incremental -C /tmp/test-extract




Sunday, 2 February 2014

Синхронизация реп


yum -y install yum-utils createrepo
yum repolist
repo id        repo name        
base           CentOS-6 - Base  
extras         CentOS-6 - Extras
updates        CentOS-6 - Updates

mkdir -p /media/repos/base
cd /media/repos/base
reposync -r base

Когда partprobe не спасает

У нас есть диск, на котором создан раздел.
Этот раздел примонтирован.

[root@node01 ~]# mount | grep sdb
/dev/sdb1 on /mnt/int_p1 type ext4 (rw)

Допустим, нам необходимо создать новый раздел на этом же диске:

[root@node01 ~]# fdisk -cu /dev/sdb
Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 2
First sector (104448-10485759, default 104448): 
Using default value 104448
Last sector, +sectors or +size{K,M,G} (104448-10485759, default 10485759): +100M

Command (m for help): p

Disk /dev/sdb: 5368 MB, 5368709120 bytes
128 heads, 57 sectors/track, 1437 cylinders, total 10485760 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x522f1a6e

   Device Boot      Start         End      Blocks   Id  System
/dev/sdb1            2048      104447       51200   83  Linux
/dev/sdb2          104448      309247      102400   83  Linux
Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.

WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
The kernel still uses the old table. The new table will be used at
the next reboot or after you run partprobe(8) or kpartx(8)
Syncing disks.

Новый раздел не виден ядром и собственно ОС.
Пробуем воспользоваться подсказанными утилитами:
[root@node01 ~]#  partprobe /dev/sdb
Warning: WARNING: the kernel failed to re-read the partition table on /dev/sdb (Device or resource busy).  As a result, it may not reflect all of your changes until after reboot.
[root@node01 ~]# mkfs.ext4 /dev/sdb2
mke2fs 1.41.12 (17-May-2010)
Could not stat /dev/sdb2 --- No such file or directory

The device apparently does not exist; did you specify it correctly?

[root@node01 ~]# kpartx -a /dev/sdb
device-mapper: reload ioctl on sdb1 failed: Invalid argument
create/reload failed on sdb1
device-mapper: reload ioctl on sdb2 failed: Invalid argument
create/reload failed on sdb2

И наконец, только partx нас спасёт, хоть и выдаст ошибку:
[root@node01 ~]# partx -a /dev/sdb
BLKPG: Device or resource busy
error adding partition 1

[root@node01 ~]# mkfs.ext4 /dev/sdb2
mke2fs 1.41.12 (17-May-2010)
Filesystem label=
OS type: Linux
Block size=1024 (log=0)
Fragment size=1024 (log=0)
Stride=0 blocks, Stripe width=0 blocks
25688 inodes, 102400 blocks
5120 blocks (5.00%) reserved for the super user
First data block=1
Maximum filesystem blocks=67371008
13 block groups
8192 blocks per group, 8192 fragments per group
1976 inodes per group
Superblock backups stored on blocks: 
8193, 24577, 40961, 57345, 73729

Writing inode tables: done                            
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done