728x90
반응형
[root@test /]# lsblk
NAME        MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
nvme0n1     259:0    0  200G  0 disk
└─nvme0n1p1 259:1    0  100G  0 part /
 
[root@test /]# growpart /dev/nvme0n1 1
CHANGED: partition=1 start=2048 old: size=209713119 end=209715167 new: size=419428319 end=419430367
 
[root@test /]# lsblk
NAME        MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
nvme0n1     259:0    0  200G  0 disk
└─nvme0n1p1 259:1    0  200G  0 part /
 
[root@test /]# xfs_growfs -d /
meta-data=/dev/nvme0n1p1         isize=512    agcount=51, agsize=524224 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0 spinodes=0
data     =                       bsize=4096   blocks=26214139, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal               bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 26214139 to 52428539
 
[root@test /]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs        7.6G     0  7.6G   0% /dev
tmpfs           7.7G     0  7.7G   0% /dev/shm
tmpfs           7.7G  401M  7.3G   6% /run
tmpfs           7.7G     0  7.7G   0% /sys/fs/cgroup
/dev/nvme0n1p1  200G   83G  118G  42% /
tmpfs           1.6G     0  1.6G   0% /run/user/1001
728x90
300x250
728x90
반응형

리스트 확인

zfs list

 

zfs 마운트 포인트 변경

zfs set mountpoint=/oradata_bk datapool/oradata   --> zfs set mountpoint=/oradata purevol/oradata

 

확인

zfs get mountpoint datapool/oradata

 

mount

zfs mount  datapool/oradata

zfs mount purevol/oradata

 

zfs 삭제

zfs destroy datapool/Arch

 

zpool list

NAME      SIZE  ALLOC   FREE  CAP  DEDUP  HEALTH  ALTROOT

dbpool    556G  6.38G   550G   1%  1.00x  ONLINE  -

purevol  11.9T  16.4G  11.9T   0%  1.00x  ONLINE  -

rpool     556G   179G   377G  32%  1.00x  ONLINE  -

 

zpool status

root@dbserver1 # zpool status

  pool: datapool

state: ONLINE

  scan: none requested

config:

 

        NAME                                     STATE     READ WRITE CKSUM

        datapool                                 ONLINE       0     0     0

          c0t624A9370B130E0A67E0B480800011011d0  ONLINE       0     0     0

 

errors: No known data errors

 

  pool: dbpool

state: ONLINE

  scan: none requested

config:

 

        NAME                       STATE     READ WRITE CKSUM

        dbpool                     ONLINE       0     0     0

          mirror-0                 ONLINE       0     0     0

            c0t5000CCA02F613828d0  ONLINE       0     0     0

            c0t5000CCA02F53C340d0  ONLINE       0     0     0

 

errors: No known data errors

 

  pool: purevol

state: ONLINE

  scan: none requested

config:

 

        NAME                                     STATE     READ WRITE CKSUM

        purevol                                  ONLINE       0     0     0

          c0t624A9370B130E0A67E0B480800011013d0  ONLINE       0     0     0

 

errors: No known data errors

 

  pool: rpool

state: ONLINE

  scan: resilvered 114G in 12m23s with 0 errors on Mon Oct 22 14:31:21 2018

 

config:

 

        NAME                       STATE     READ WRITE CKSUM

        rpool                      ONLINE       0     0     0

          mirror-0                 ONLINE       0     0     0

            c0t5000CCA02F540950d0  ONLINE       0     0     0

            c0t5000CCA02F613E14d0  ONLINE       0     0     0

 

datapool 삭제 zfs destroy 는 삭제가 안됨.

zpool destroy -f datapool

 

 

 

728x90
300x250
728x90
반응형
p>vi에서

:%s/old/new/gc

 

명령어에서

cat test.log | sed -i 's/old/new/g'

 

728x90
300x250
728x90
반응형

1. 링크 up 되는 디바이스 확인

[root@rhel6-test ~]# ethtool eth0 | grep Link

        Link detected: yes
[root@rhel6-test ~]# ethtool eth1 | grep Link
        Link detected: yes
[root@rhel6-test ~]# ethtool eth2 | grep Link
        Link detected: no  

 

확인결과 eth0, eth1 디바이스가 링크 UP되어있다. 

 

2. bonding 디바이스설정

- bond0(본딩마스터)
[root@rhel6-test ~]# vi /etc/sysconfig/network-scripts/ifcfg-bond0
DEVICE=bond0
BOOTPROTO=none
IPADDR=192.168.0.65
NETMASK=255.255.255.0
GATEWAY=192.168.0.1
ONBOOT=yes
BONDING_OPTS="mode=1 miimon=100"
TYPE=BOND
USERCTL=no


- eth0,eth1 (본딩슬래이브)
[root@rhel6-test ~]# vi /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
BOOTPROTO=none
HWADDR=00:0c:29:4c:15:06
ONBOOT=yes
MASTER=bond0
SLAVE=yes
USERCTL= no
TYPE= Ethernet
[root@rhel6-test ~]# vi /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
BOOTPROTO=none
HWADDR=00:0c:29:4c:15:10
ONBOOT=yes
MASTER=bond0
SLAVE=yes
USERCTL= no
TYPE= Ethernet  

 

3. NetworkManager 데몬 STOP
- 해당 데몬 살아있을시 bonding device 에러 (중요함)
[root@rhel6-test ~]# chkconfig NetworkManager off

 

4. network service 재시작
[root@rhel6-test ~]# service network restart

 

5.Bonding Interface 확인
[root@rhel6-test ~]#  cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: None
Currently Active Slave: eth0
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth0
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 00:0c:29:4c:15:06
Slave queue ID: 0
Slave Interface: eth1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 00:0c:29:4c:15:10
Slave queue ID: 0 
[root@rhel6-test ~]# service NetworkManager stop

728x90
300x250

+ Recent posts