- 论坛徽章:
- 0
|
本帖最后由 shanren067 于 2013-10-20 13:45 编辑
大家好,我这里遇到一个问题,如标题所说,现在我需要给两台M4000(solaris10U10 ZFS)挂接同一个卷,为了方便以后能扩空间,用zfs管理,把新加的卷填到DBpool里。但是发现先创建的pool可以成功(db02),再另一台(db01)server里创建就需要加“-f"才行! 但是这就导致先创建db02 的Pool 有问题,详见LOG,是操作方法有问题还是ZFS的局限性? 请高人帮我看一下!先谢谢了!
我知道可以通过SVM也能实现磁盘动态扩展,但是想用ZFS来做,这样方便管理。
root@db01 # format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
0. c0t0d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848>
/pci@0,600000/pci@0/pci@8/pci@0/scsi@1/sd@0,0
1. c0t1d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848> Solaris
/pci@0,600000/pci@0/pci@8/pci@0/scsi@1/sd@1,0
2. c3t600143801259DA290000F000000F0000d0 <HP-HSV360-1100-1.50TB>
/scsi_vhci/ssd@g600143801259da290000f000000f0000
Specify disk (enter its number): ^C
root@pdb01 # exit
logout
Connection to db01 closed.
root@db02 # format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
0. c0t0d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848>
/pci@0,600000/pci@0/pci@8/pci@0/scsi@1/sd@0,0
1. c0t1d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848>
/pci@0,600000/pci@0/pci@8/pci@0/scsi@1/sd@1,0
2. c3t600143801259DA290000F000000F0000d0 <HP-HSV360-1100-1.50TB>
/scsi_vhci/ssd@g600143801259da290000f000000f0000
Specify disk (enter its number):
root@Db02 # zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
rpool 136G 39.7G 96.3G 29% ONLINE -
root@Db02 # zpool create DBpool c3t600143801259DA290000F000000F0000d0
invalid vdev specification
use '-f' to override the following errors:
/dev/dsk/c3t600143801259DA290000F000000F0000d0s0 is part of exported or potenti
root@Db02 # zpool create -f DBpool c3t600143801259DA290000F000000F0000d
root@Db02 #
root@Db02 #
root@Db02 # zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
DBpool 1.49T 80K 1.49T 0% ONLINE -
rpool 136G 39.7G 96.3G 29% ONLINE -
root@Db02 # zpool status
pool: DBpool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
DBpool ONLINE 0 0 0
c3t600143801259DA290000F000000F0000d0 ONLINE 0 0 0
errors: No known data errors
pool: rpool
state: ONLINE
scan: resilvered 39.7G in 0h12m with 0 errors on Wed Sep 18 14:56:41 2013
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
c0t1d0s0 ONLINE 0 0 0
c0t0d0s0 ONLINE 0 0 0
errors: No known data errors
root@Db02 # ssh db01
Last login: Sat Oct 19 19:34:31 2013 from db02
Oracle Corporation SunOS 5.10 Generic Patch January 2005
Sourcing //.profile-EIS.....
root@Db01 # zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
rpool 136G 46.2G 89.8G 33% ONLINE -
root@Db01 # format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
0. c0t0d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848>
/pci@0,600000/pci@0/pci@8/pci@0/scsi@1/sd@0,0
1. c0t1d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848> Solaris
/pci@0,600000/pci@0/pci@8/pci@0/scsi@1/sd@1,0
2. c3t600143801259DA290000F000000F0000d0 <HP-HSV360-1100-1.50TB>
/scsi_vhci/ssd@g600143801259da290000f000000f0000
Specify disk (enter its number): zp^H^C
root@Db01 # zpool create DBpool c3t600143801259DA290000F000000F0000d0
invalid vdev specification
use '-f' to override the following errors:
/dev/dsk/c3t600143801259DA290000F000000F0000d0s0 is part of exported or potenti.
root@Db01 # zpool create -f DBpool c3t600143801259DA290000F000000F0000d0
root@Db01 # zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
DBpool 1.49T 80K 1.49T 0% ONLINE -
rpool 136G 46.2G 89.8G 33% ONLINE -
root@Db01 # zpool status
pool: DBpool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
DBpool ONLINE 0 0 0
c3t600143801259DA290000F000000F0000d0 ONLINE 0 0 0
errors: No known data errors
pool: rpool
state: ONLINE
scan: resilvered 6.30G in 0h4m with 0 errors on Wed Sep 18 13:29:06 2013
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
c0t0d0s0 ONLINE 0 0 0
c0t1d0s0 ONLINE 0 0 0
errors: No known data errors
root@Db01 # exit
logout
Connection to db01 closed.
root@Db02 # zpool status
pool: DBpool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
DBpool ONLINE 0 0 0
c3t600143801259DA290000F000000F0000d0 ONLINE 0 0 0
errors: No known data errors
pool: rpool
state: ONLINE
scan: resilvered 39.7G in 0h12m with 0 errors on Wed Sep 18 14:56:41 2013
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
c0t1d0s0 ONLINE 0 0 0
c0t0d0s0 ONLINE 0 0 0
errors: No known data errors
root@Db02 #
root@Db02 # ssh db01
Last login: Sat Oct 19 19:47:43 2013 from db02
Oracle Corporation SunOS 5.10 Generic Patch January 2005
Sourcing //.profile-EIS.....
root@Db01 # zfs create DBpool/lm
root@Db01 # df -h
Filesystem size used avail capacity Mounted on
rpool/ROOT/s10s_u10wos_17b
134G 13G 87G 13% /
/devices 0K 0K 0K 0% /devices
ctfs 0K 0K 0K 0% /system/contract
proc 0K 0K 0K 0% /proc
mnttab 0K 0K 0K 0% /etc/mnttab
swap 58G 432K 58G 1% /etc/svc/volatile
objfs 0K 0K 0K 0% /system/object
sharefs 0K 0K 0K 0% /etc/dfs/sharetab
fd 0K 0K 0K 0% /dev/fd
swap 58G 32K 58G 1% /tmp
swap 58G 72K 58G 1% /var/run
rpool/export 134G 32K 87G 1% /export
rpool/export/home 134G 35K 87G 1% /export/home
rpool 134G 106K 87G 1% /rpool
DBpool 1.5T 31K 1.5T 1% /DBpool
DBpool/lm 1.5T 31K 1.5T 1% /DBpool/lm
root@Db01 # zfs set quota=500g DBpool/lm
root@Db01 # df -h
Filesystem size used avail capacity Mounted on
rpool/ROOT/s10s_u10wos_17b
134G 13G 87G 13% /
/devices 0K 0K 0K 0% /devices
ctfs 0K 0K 0K 0% /system/contract
proc 0K 0K 0K 0% /proc
mnttab 0K 0K 0K 0% /etc/mnttab
swap 58G 432K 58G 1% /etc/svc/volatile
objfs 0K 0K 0K 0% /system/object
sharefs 0K 0K 0K 0% /etc/dfs/sharetab
fd 0K 0K 0K 0% /dev/fd
swap 58G 32K 58G 1% /tmp
swap 58G 72K 58G 1% /var/run
rpool/export 134G 32K 87G 1% /export
rpool/export/home 134G 35K 87G 1% /export/home
rpool 134G 106K 87G 1% /rpool
DBpool 1.5T 32K 1.5T 1% /DBpool
DBpool/lm 500G 31K 500G 1% /DBpool/lm
root@Db01 # exit
logout
Connection to db01 closed.
root@Db02 # zfs create DBpool/data
cannot mount '/DBpool/data': failed to create mountpoint
filesystem successfully created, but not mounted
root@Db02 # df -h
Filesystem size used avail capacity Mounted on
rpool/ROOT/s10s_u10wos_17b
134G 6.2G 93G 7% /
/devices 0K 0K 0K 0% /devices
ctfs 0K 0K 0K 0% /system/contract
proc 0K 0K 0K 0% /proc
mnttab 0K 0K 0K 0% /etc/mnttab
swap 58G 432K 58G 1% /etc/svc/volatile
objfs 0K 0K 0K 0% /system/object
sharefs 0K 0K 0K 0% /etc/dfs/sharetab
fd 0K 0K 0K 0% /dev/fd
swap 58G 32K 58G 1% /tmp
swap 58G 72K 58G 1% /var/run
rpool/export 134G 32K 93G 1% /export
rpool/export/home 134G 38K 93G 1% /export/home
rpool 134G 106K 93G 1% /rpool
DBpool 1.5T 31K 1.5T 1% /DBpool
root@Db02 # cd DBpool/
root@Db02 # ls
root@Db02 #
SUNW-MSG-ID: ZFS-8000-GH, TYPE: Fault, VER: 1, SEVERITY: Major
EVENT-TIME: Sat Oct 19 19:55:09 CST 2013
PLATFORM: SUNW,SPARC-Enterprise, CSN: BEF1014C28, HOSTNAME: Db02
SOURCE: zfs-diagnosis, REV: 1.0
EVENT-ID: 69626b5a-6d3f-e193-c0ef-93c3ff91178a
DESC: The number of checksum errors associated with a ZFS device
exceeded acceptable levels. Refer to http://sun.com/msg/ZFS-8000-GH for more i.
AUTO-RESPONSE: The device has been marked as degraded. An attempt
will be made to activate a hot spare if available.
IMPACT: Fault tolerance of the pool may be compromised.
REC-ACTION: Run 'zpool status -x' and replace the bad device.
root@Db02 # zpool status -x
pool: DBpool
state: DEGRADED
status: One or more devices has experienced an error resulting in data
corruption. Applications may be affected.
action: Restore the file in question if possible. Otherwise restore the
entire pool from backup.
see: http://www.sun.com/msg/ZFS-8000-8A
scan: none requested
config:
NAME STATE READ WRITE CKSUM
DBpool DEGRADED 0 0 6
c3t600143801259DA290000F000000F0000d0 DEGRADED 0 0 24 tos
errors: 1 data errors, use '-v' for a list
root@Db02 # zpool status DBpool -v
cannot open '-v': name must begin with a letter
pool: DBpool
state: DEGRADED
status: One or more devices has experienced an error resulting in data
corruption. Applications may be affected.
action: Restore the file in question if possible. Otherwise restore the
entire pool from backup.
see: http://www.sun.com/msg/ZFS-8000-8A
scan: none requested
config:
NAME STATE READ WRITE CKSUM
DBpool DEGRADED 0 0 10
c3t600143801259DA290000F000000F0000d0 DEGRADED 0 0 40 tos
errors: 1 data errors, use '-v' for a list
root@Db02 #
|
|