- 论坛徽章:
- 0
|
RS6000 H85双机系统,安装 AIX 4.3.3操作系统,ha版本是4.4.1, ha配置成cascading模式,两台主机分别运行oracle,cics两个应用,当某台主机出现故障的时候,另一台主机会接管全部应用,当故障主机恢复厚,它会接管其高优先级的服务,
当在做接管测试时候,有时候正常,但是,有时候,运行cics的主机在停止IP地址时发生错误,然后机器两个地址都无法访问,而运行oracle的主机开始报找不到它的错误,具体的hacmp.out的输出如下:
/usr/sbin/cluster/appserver2/appstart.sh[16]: 36814 Abort(coredump)
Feb 20 13:25:42 EVENT START: node_down QYJF2
node_down[56] [[ high = high ]]
node_down[56] version=1.29
node_down[57] node_down[57] cl_get_path
HA_DIR=es
node_down[59] NODENAME=QYJF2
node_down[60] PARAM=
node_down[62] VSD_PROG=/usr/lpp/csd/bin/hacmp_vsd_down1
node_down[71] STATUS=0
node_down[73] [[ -z ]]
node_down[74] EMULATE=REAL
node_down[77] set -u
node_down[79] (( 1 < 1 ))
node_down[84] rm -f /tmp/.NFSSTOPPED
node_down[85] rm -f /tmp/.RPCLOCKDSTOPPED
node_down[87] [[ = forced ]]
node_down[105] UPDATESTATD=0
node_down[106] export UPDATESTATD
node_down[113] set -a
node_down[114] clsetenvgrp QYJF2 node_down
clsetenvgrp[48] [[ high = high ]]
clsetenvgrp[48] version=1.17
clsetenvgrp[50] usingVer=clSetenvgrp
clsetenvgrp[55] clSetenvgrp QYJF2 node_down
executing clSetenvgrp
clSetenvgrp: argc = 3
clSetenvgrp completed successfully
clsetenvgrp[56] exit 0
node_down[114] eval NFS_cics_res="FALSE" NFS_data_res="TRUE" FORCEDOWN_GROUPS="" RESOURCE_GROUPS="cics_res data_res" HOMELESS_GROUPS=""
node_down[114] NFS_cics_res=FALSE NFS_data_res=TRUE FORCEDOWN_GROUPS= RESOURCE_GROUPS=cics_res data_res HOMELESS_GROUPS=
node_down[115] RC=0
node_down[116] set +a
node_down[117] (( 0 != 0 ))
node_down[125] [[ REAL = EMUL ]]
node_down[132] cl_ssa_fence down QYJF2
cl_ssa_fence[70] [[ high = high ]]
cl_ssa_fence[70] version=1.9
cl_ssa_fence[71] cl_ssa_fence[71] cl_get_path
HA_DIR=es
cl_ssa_fence[74] echo PRE_EVENT_MEMBERSHIP=QYJF1 QYJF2
PRE_EVENT_MEMBERSHIP=QYJF1 QYJF2
cl_ssa_fence[75] echo POST_EVENT_MEMBERSHIP=QYJF1
POST_EVENT_MEMBERSHIP=QYJF1
cl_ssa_fence[77] EVENT=down
cl_ssa_fence[78] NODENAME=QYJF2
cl_ssa_fence[79] STATUS=0
cl_ssa_fence[82] export EVENT_ON_NODE=QYJF2
cl_ssa_fence[84] [ 2 -gt 1 ]
cl_ssa_fence[134] [ QYJF2 = QYJF2 ]
cl_ssa_fence[136] [ QYJF1 != ]
cl_ssa_fence[139] exit 0
node_down[138] [[ REAL = EMUL ]]
node_down[145] cl_9333_fence down QYJF2
cl_9333_fence[158] [[ high = high ]]
cl_9333_fence[158] version=1.9
cl_9333_fence[159] cl_9333_fence[159] cl_get_path
HA_DIR=es
cl_9333_fence[162] echo PRE_EVENT_MEMBERSHIP=QYJF1 QYJF2
PRE_EVENT_MEMBERSHIP=QYJF1 QYJF2
cl_9333_fence[163] echo POST_EVENT_MEMBERSHIP=QYJF1
POST_EVENT_MEMBERSHIP=QYJF1
cl_9333_fence[165] EVENT=down
cl_9333_fence[166] NODENAME=QYJF2
cl_9333_fence[167] PARAM=
cl_9333_fence[168] STATUS=0
cl_9333_fence[170] set -u
cl_9333_fence[172] [ 2 -gt 1 ]
cl_9333_fence[286] [ QYJF2 = QYJF2 ]
cl_9333_fence[290] [ QYJF1 != ]
cl_9333_fence[293] exit 0
node_down[155] [[ QYJF2 != QYJF2 ]]
node_down[176] set -a
node_down[177] clsetenvres cics_res node_down
node_down[177] eval NFS_HOST= DISK= CONCURRENT_VOLUME_GROUP= EXPORT_FILESYSTEM= AIX_CONNECTIONS_SERVICES= AIX_FAST_CONNECT_SERVICES= SNA_CONNECTIONS= SHARED_TAPE_RESOURCES= MOUNT_FILESYSTEM= HTY_SERVICE_LABEL= TAKEOVER_LABEL= NFSMOUNT_LABEL= MISC_DATA= NFS_NETWORK= SHARED_TAPE_RESOURCES= APPLICATIONS="cics_app" CASCADE_WO_FALLBACK="false" DISK_FENCING="false" FILESYSTEM="ALL" FSCHECK_TOOL="fsck" FS_BEFORE_IPADDR="false" INACTIVE_TAKEOVER="false" RECOVERY_METHOD="sequential" SERVICE_LABEL="QYJF2_svc" SSA_DISK_FENCING="false" VG_AUTO_IMPORT="false" VOLUME_GROUP="VGFILE"
node_down[177] NFS_HOST= DISK= CONCURRENT_VOLUME_GROUP= EXPORT_FILESYSTEM= AIX_CONNECTIONS_SERVICES= AIX_FAST_CONNECT_SERVICES= SNA_CONNECTIONS= SHARED_TAPE_RESOURCES= MOUNT_FILESYSTEM= HTY_SERVICE_LABEL= TAKEOVER_LABEL= NFSMOUNT_LABEL= MISC_DATA= NFS_NETWORK= SHARED_TAPE_RESOURCES= APPLICATIONS=cics_app CASCADE_WO_FALLBACK=false DISK_FENCING=false FILESYSTEM=ALL FSCHECK_TOOL=fsck FS_BEFORE_IPADDR=false INACTIVE_TAKEOVER=false RECOVERY_METHOD=sequential SERVICE_LABEL=QYJF2_svc SSA_DISK_FENCING=false VG_AUTO_IMPORT=false VOLUME_GROUP=VGFILE
node_down[178] set +a
node_down[179] export GROUPNAME=cics_res
node_down[179] [[ QYJF2 = QYJF2 ]]
node_down[185] clcallev node_down_local
Feb 20 13:25:43 EVENT START: node_down_local
node_down_local[159] [[ high = high ]]
node_down_local[159] version=1.2.1.35
node_down_local[160] node_down_local[160] cl_get_path
HA_DIR=es
node_down_local[162] STATUS=0
node_down_local[164] [ ! -n ]
node_down_local[166] EMULATE=REAL
node_down_local[169] [ 0 -ne 0 ]
node_down_local[175] set -u
node_down_local[183] clchdaemons -f -d clstrmgr_scripts -t resource_locator -o cics_res
UP
node_down_local[184] [ 0 -ne 0 ]
node_down_local[192] set_resource_status RELEASING
node_down_local[3] set +u
node_down_local[4] NOT_DOIT=
node_down_local[5] set -u
node_down_local[6] [ != TRUE ]
node_down_local[8] [ REAL = EMUL ]
node_down_local[13] clchdaemons -d clstrmgr_scripts -t resource_locator -n QYJF2 -o cics_res -v RELEASING
node_down_local[14] [ 0 -ne 0 ]
node_down_local[23] [ RELEASING != ERROR ]
node_down_local[25] cl_RMupdate releasing cics_res node_down_local
Reference string: Mon.Feb.20.13:25:43.beijing.2006.node_down_local.cics_res.ref
node_down_local[26] [ 0 -ne 0 ]
node_down_local[197] [ -n cics_app ]
node_down_local[200] TMPLIST=
node_down_local[201] let cnt=0
node_down_local[202] print cics_app
node_down_local[202] set -A appnames cics_app
node_down_local[204] (( cnt < 1 ))
node_down_local[205] TMPLIST=cics_app
node_down_local[206] APPLICATIONS=cics_app
node_down_local[207] let cnt=cnt+1
node_down_local[204] (( cnt < 1 ))
node_down_local[210] APPLICATIONS=cics_app
node_down_local[213] [ REAL = EMUL ]
node_down_local[218] clcallev stop_server cics_app
Feb 20 13:25:44 EVENT START: stop_server cics_app
stop_server[48] [[ high = high ]]
stop_server[48] version=1.4.1.7
stop_server[49] stop_server[49] cl_get_path
HA_DIR=es
stop_server[51] STATUS=0
stop_server[53] SS_FILE=/usr/es/sbin/cluster/server.status
stop_server[57] [ ! -n ]
stop_server[59] EMULATE=REAL
stop_server[62] set -u
stop_server[68] cl_RMupdate resource_releasing cics_app stop_server
Reference string: Mon.Feb.20.13:25:44.beijing.2006.stop_server.cics_app.ref
stop_server[75] stop_server[75] cllsserv -cn cics_app
stop_server[75] cut -d: -f3
STOP=/usr/sbin/cluster/appserver2/appstop.sh
stop_server[77] PATTERN=QYJF2 cics_app
stop_server[84] grep -x QYJF2 cics_app /usr/es/sbin/cluster/server.status
stop_server[84] 2> /dev/null
stop_server[84] [ QYJF2 cics_app != ]
stop_server[91] [ -x /usr/sbin/cluster/appserver2/appstop.sh ]
stop_server[93] [ REAL = EMUL ]
stop_server[98] /usr/sbin/cluster/appserver2/appstop.sh
stop_server[98] ODMDIR=/etc/objrepos
stop_server[100] [ 0 -ne 0 ]
stop_server[107] cat /usr/es/sbin/cluster/server.status
stop_server[107] grep -vx QYJF2 cics_app
stop_server[107] cat
stop_server[107] 1> /tmp/server.tmp
stop_server[108] mv /tmp/server.tmp /usr/es/sbin/cluster/server.status
stop_server[110] cl_RMupdate resource_down cics_app stop_server
Reference string: Mon.Feb.20.13:25:45.beijing.2006.stop_server.cics_app.ref
stop_server[122] exit 0
Feb 20 13:25:46 EVENT COMPLETED: stop_server cics_app
node_down_local[221] [ 0 -ne 0 ]
node_down_local[230] [ -n ]
node_down_local[248] [ -n ]
node_down_local[267] [[ -n ]]
node_down_local[292] [ -n ]
node_down_local[312] CROSSMOUNT=0
node_down_local[313] export CROSSMOUNT
node_down_local[315] NFSSTOPPED=0
node_down_local[316] export NFSSTOPPED
node_down_local[318] [ -n ]
node_down_local[336] [ -n ]
node_down_local[366] [ 0 = 0 ]
node_down_local[370] odmget HACMPnode
node_down_local[370] sort
node_down_local[370] uniq
node_down_local[370] grep name =
node_down_local[370] wc -l
node_down_local[370] [ 2 -eq 2 ]
node_down_local[372] node_down_local[372] odmget HACMPgroup
node_down_local[372] grep group =
node_down_local[372] awk {print $3}
node_down_local[372] sed s/"//g
RESOURCE_GROUPS=data_res
cics_res
node_down_local[376] node_down_local[376] odmget -q group=data_res AND name=EXPORT_FILESYSTEM HACMPresource
node_down_local[376] grep value
node_down_local[376] awk {print $3}
node_down_local[376] sed s/"//g
EXPORTLIST=
node_down_local[377] [ -n ]
node_down_local[376] node_down_local[376] odmget -q group=cics_res AND name=EXPORT_FILESYSTEM HACMPresource
node_down_local[376] grep value
node_down_local[376] awk {print $3}
node_down_local[376] sed s/"//g
EXPORTLIST=
node_down_local[377] [ -n ]
node_down_local[401] [[ false = true ]]
node_down_local[409] clcallev release_vg_fs ALL VGFILE
Feb 20 13:25:46 EVENT START: release_vg_fs ALL VGFILE
release_vg_fs[61] [[ high = high ]]
release_vg_fs[61] version=1.4.1.21
release_vg_fs[63] STATUS=0
release_vg_fs[65] (( 2 != 2 ))
release_vg_fs[71] FILE_SYSTEMS=ALL
release_vg_fs[72] VOLUME_GROUPS=VGFILE
release_vg_fs[73] VG_MOD=false
release_vg_fs[79] [[ ALL = ALL ]]
release_vg_fs[81] FILE_SYSTEMS=
release_vg_fs[81] [[ -z VGFILE ]]
release_vg_fs[81] [[ -n VGFILE ]]
release_vg_fs[93] release_vg_fs[93] rdsort VGFILE
release_vg_fs[3] echo VGFILE
release_vg_fs[3] sed -e s/\ /\
/g
release_vg_fs[4] sort -ru
VOLUME_GROUPS=VGFILE
release_vg_fs[96] release_vg_fs[96] awk $2 ~ /jfs2*$/ && $7 ~ /^\// {print $7}
release_vg_fs[96] lsvg -l VGFILE
FILE_SYSTEMS=/lbill
/lbill/data
release_vg_fs[96] [[ false = true ]]
release_vg_fs[111] release_vg_fs[111] cl_fs2disk -v /lbill
vg=VGFILE
release_vg_fs[111] [[ = reconfig* ]]
release_vg_fs[129] VOLUME_GROUPS=VGFILE VGFILE
release_vg_fs[111] release_vg_fs[111] cl_fs2disk -v /lbill/data
vg=VGFILE
release_vg_fs[111] [[ = reconfig* ]]
release_vg_fs[129] VOLUME_GROUPS=VGFILE VGFILE VGFILE
release_vg_fs[138] [[ -n /lbill
/lbill/data ]]
release_vg_fs[141] release_vg_fs[141] rdsort /lbill /lbill/data
release_vg_fs[3] echo /lbill /lbill/data
release_vg_fs[3] sed -e s/\ /\
/g
release_vg_fs[4] sort -ru
FILE_SYSTEMS=/lbill/data
/lbill
release_vg_fs[144] cl_RMupdate resource_releasing /lbill/data /lbill release_vg_fs
Reference string: Mon.Feb.20.13:25:50.beijing.2006..lbill..lbill.data.ref
release_vg_fs[146] cl_deactivate_fs /lbill/data /lbill
cl_deactivate_fs[153] [[ high = high ]]
cl_deactivate_fs[153] version=1.1.4.14
cl_deactivate_fs[155] STATUS=0
cl_deactivate_fs[156] SLEEP=2
cl_deactivate_fs[157] TMP_FILENAME=_deactivate_fs.tmp
cl_deactivate_fs[160] [ ! -n ]
cl_deactivate_fs[162] EMULATE=REAL
cl_deactivate_fs[165] [ 2 -eq 0 ]
cl_deactivate_fs[173] echo /lbill/data
cl_deactivate_fs[173] set -A fs_array /lbill/data
cl_deactivate_fs[174] FILE1=/lbill/data
cl_deactivate_fs[179] RES_GRP=cics_res
cl_deactivate_fs[180] TMP_FILENAME=cics_res_deactivate_fs.tmp
cl_deactivate_fs[184] [ sequential = ]
cl_deactivate_fs[190] cl_deactivate_fs[190] sed s/^ //
cl_deactivate_fs[190] print sequential
RECOVERY_METHOD=sequential
cl_deactivate_fs[191] cl_deactivate_fs[191] sed s/ $//
cl_deactivate_fs[191] print sequential
RECOVERY_METHOD=sequential
cl_deactivate_fs[192] [ sequential != sequential -a sequential != parallel ]
cl_deactivate_fs[200] set -u
cl_deactivate_fs[272] [[ -f /tmp/cics_res_deactivate_fs.tmp ]]
cl_deactivate_fs[275] cl_deactivate_fs[275] /bin/sort -r
cl_deactivate_fs[275] /bin/echo /lbill/data
cl_deactivate_fs[275] /bin/echo /lbill
FILELIST=/lbill/data
/lbill
cl_deactivate_fs[282] [ sequential = parallel ]
cl_deactivate_fs[292] [ REAL = EMUL ]
cl_deactivate_fs[297] fs_umount /lbill/data cl_deactivate_fs cics_res_deactivate_fs.tmp
cl_deactivate_fs[4] FS=/lbill/data
cl_deactivate_fs[5] PROGNAME=cl_deactivate_fs
cl_deactivate_fs[6] TMP_FILENAME=cics_res_deactivate_fs.tmp
cl_deactivate_fs[7] STATUS=0
cl_deactivate_fs[10] cl_deactivate_fs[10] cl_fs2disk -l /lbill/data
lv=lbill_lv2
cl_deactivate_fs[12] [ 0 -ne 0 ]
cl_deactivate_fs[23] mount
cl_deactivate_fs[23] awk { print $1 }
cl_deactivate_fs[23] fgrep -s -x /dev/lbill_lv2
cl_deactivate_fs[25] [ 0 -eq 0 ]
cl_deactivate_fs[27] [ ! -f /tmp/.RPCLOCKDSTOPPED ]
cl_deactivate_fs[29] grep -w /lbill/data
cl_deactivate_fs[29] echo
cl_deactivate_fs[30] [ 1 = 0 ]
cl_deactivate_fs[41] fuser -k -u -x /dev/lbill_lv2
/dev/lbill_lv2:
cl_deactivate_fs[42] COUNT=60
cl_deactivate_fs[43] true
cl_deactivate_fs[44] umount /lbill/data
cl_deactivate_fs[45] [ 0 -ne 0 ]
cl_deactivate_fs[64] break
cl_deactivate_fs[70] echo 0
cl_deactivate_fs[70] 1>> /tmp/cics_res_deactivate_fs.tmp
cl_deactivate_fs[71] return 0
cl_deactivate_fs[282] [ sequential = parallel ]
cl_deactivate_fs[292] [ REAL = EMUL ]
cl_deactivate_fs[297] fs_umount /lbill cl_deactivate_fs cics_res_deactivate_fs.tmp
cl_deactivate_fs[4] FS=/lbill
cl_deactivate_fs[5] PROGNAME=cl_deactivate_fs
cl_deactivate_fs[6] TMP_FILENAME=cics_res_deactivate_fs.tmp
cl_deactivate_fs[7] STATUS=0
cl_deactivate_fs[10] cl_deactivate_fs[10] cl_fs2disk -l /lbill
lv=lbill_lv1
cl_deactivate_fs[12] [ 0 -ne 0 ]
cl_deactivate_fs[23] awk { print $1 }
cl_deactivate_fs[23] fgrep -s -x /dev/lbill_lv1
cl_deactivate_fs[23] mount
cl_deactivate_fs[25] [ 0 -eq 0 ]
cl_deactivate_fs[27] [ ! -f /tmp/.RPCLOCKDSTOPPED ]
cl_deactivate_fs[29] grep -w /lbill
cl_deactivate_fs[29] echo
cl_deactivate_fs[30] [ 1 = 0 ]
cl_deactivate_fs[41] fuser -k -u -x /dev/lbill_lv1
/dev/lbill_lv1: 38722c(lbas)Extended read failed (sid=263856, off=804503320).
Extended read failed (sid=263856, off=80450240 .
cl_deactivate_fs[42] COUNT=60
cl_deactivate_fs[43] true
cl_deactivate_fs[44] umount /lbill
cl_deactivate_fs[45] [ 0 -ne 0 ]
cl_deactivate_fs[64] break
cl_deactivate_fs[70] echo 0
cl_deactivate_fs[70] 1>> /tmp/cics_res_deactivate_fs.tmp
cl_deactivate_fs[71] return 0
cl_deactivate_fs[303] wait
cl_deactivate_fs[306] [ -f /tmp/cics_res_deactivate_fs.tmp ]
cl_deactivate_fs[308] grep -q 1 /tmp/cics_res_deactivate_fs.tmp
cl_deactivate_fs[308] [[ 1 -eq 0 ]]
cl_deactivate_fs[311] rm -f /tmp/cics_res_deactivate_fs.tmp
cl_deactivate_fs[315] exit 0
release_vg_fs[153] cl_RMupdate resource_down /lbill/data /lbill release_vg_fs
Reference string: Mon.Feb.20.13:25:51.beijing.2006..lbill..lbill.data.ref
release_vg_fs[175] [[ -n VGFILE VGFILE VGFILE ]]
release_vg_fs[178] release_vg_fs[178] rdsort VGFILE VGFILE VGFILE
release_vg_fs[3] echo VGFILE VGFILE VGFILE
release_vg_fs[3] sed -e s/\ /\
/g
release_vg_fs[4] sort -ru
VOLUME_GROUPS=VGFILE
release_vg_fs[181] cl_RMupdate resource_releasing VGFILE release_vg_fs
Reference string: Mon.Feb.20.13:25:51.beijing.2006.release_vg_fs.VGFILE.ref
release_vg_fs[183] cl_deactivate_vgs VGFILE
cl_deactivate_vgs[162] [[ high = high ]]
cl_deactivate_vgs[162] version=1.1.1.25
cl_deactivate_vgs[164] STATUS=0
cl_deactivate_vgs[165] TMP_FILENAME=_deactivate_vgs.tmp
cl_deactivate_vgs[167] [ ! -n ]
cl_deactivate_vgs[169] EMULATE=REAL
cl_deactivate_vgs[172] EVENT_TYPE=not_set
cl_deactivate_vgs[173] EVENT_TYPE=not_set
cl_deactivate_vgs[176] set -u
cl_deactivate_vgs[179] [ 1 -eq 0 ]
cl_deactivate_vgs[187] [[ -f /tmp/_deactivate_vgs.tmp ]]
cl_deactivate_vgs[194] fgrep -s -x VGFILE
cl_deactivate_vgs[194] lsvg -o
cl_deactivate_vgs[196] [ 0 -ne 0 ]
cl_deactivate_vgs[200] [ REAL = EMUL ]
cl_deactivate_vgs[211] wait
cl_deactivate_vgs[205] vgs_varyoff VGFILE cl_deactivate_vgs _deactivate_vgs.tmp
cl_deactivate_vgs[4] VG=VGFILE
cl_deactivate_vgs[5] PROGNAME=cl_deactivate_vgs
cl_deactivate_vgs[6] TMP_FILENAME=_deactivate_vgs.tmp
cl_deactivate_vgs[7] STATUS=0
cl_deactivate_vgs[7] [[ not_set = reconfig* ]]
cl_deactivate_vgs[19] close_vg VGFILE cl_deactivate_vgs _deactivate_vgs.tmp
cl_deactivate_vgs[4] VG=VGFILE
cl_deactivate_vgs[5] PROGNAME=cl_deactivate_vgs
cl_deactivate_vgs[6] TMP_FILENAME=_deactivate_vgs.tmp
cl_deactivate_vgs[7] STATUS=0
cl_deactivate_vgs[10] cl_deactivate_vgs[10] awk {if ($2 == "jfs" && $6 ~ /open/) print $1}
cl_deactivate_vgs[10] lsvg -l VGFILE
OPEN_LVs=
cl_deactivate_vgs[13] [ -n ]
cl_deactivate_vgs[22] odmget HACMPnode
cl_deactivate_vgs[22] grep name =
cl_deactivate_vgs[22] sort
cl_deactivate_vgs[22] uniq
cl_deactivate_vgs[22] wc -l
cl_deactivate_vgs[22] [ 2 -eq 2 ]
cl_deactivate_vgs[24] [ -n ]
cl_deactivate_vgs[49] varyoffvg VGFILE
cl_deactivate_vgs[50] [ 0 -ne 0 ]
cl_deactivate_vgs[57] echo 0
cl_deactivate_vgs[57] 1>> /tmp/_deactivate_vgs.tmp
cl_deactivate_vgs[58] return 0
cl_deactivate_vgs[214] [ -f /tmp/_deactivate_vgs.tmp ]
cl_deactivate_vgs[216] grep -q 1 /tmp/_deactivate_vgs.tmp
cl_deactivate_vgs[216] [[ 1 -eq 0 ]]
cl_deactivate_vgs[219] rm -f /tmp/_deactivate_vgs.tmp
cl_deactivate_vgs[223] exit 0
release_vg_fs[190] cl_RMupdate resource_down VGFILE release_vg_fs
Reference string: Mon.Feb.20.13:25:54.beijing.2006.release_vg_fs.VGFILE.ref
release_vg_fs[194] exit 0
Feb 20 13:25:54 EVENT COMPLETED: release_vg_fs ALL VGFILE
node_down_local[410] [ 0 -ne 0 ]
node_down_local[417] [ -n ]
node_down_local[432] [[ false != true ]]
node_down_local[434] release_addr
node_down_local[9] [ -n ]
node_down_local[37] [ -n QYJF2_svc ]
node_down_local[39] [ -n ]
node_down_local[47] clcallev release_service_addr QYJF2_svc |
|