San Francisco

dave spink toolset




nas_slice -list ;to view a list of slice names used
nas_volume -list ;to view a list of volume names in use
nas_slice -name $myslice -create d8 8192 ;create slice volume slv1 8GB from existing volume d8
nas_volume -name $myvol -create -Stripe 32768 d10,d12,d13,d14 ;create stripe volume, 32K is stripe size
nas_volume -name $mymetvo1 -create -Meta $myvol, $myvol2, $myvol3 ;create metavolume
nas_volume -name $myvol -c -S size=10 ;celerra picks the volumes to create 10GB volume


nas_pool -list ;list NAS pool
nas_pool -size $mypl ;show NAS pool size


server_df $mydm ;to see file system usage
server_df $mydm -inode ;see inodes used
nas_fs -list ;list file systems name and id
nas_fs -info $myfs ;see file system info
nas_fs -size $myfs ;see file system size
nas_fs -name $myfs -create size=${mygb}G pool=$mypl -auto_extend no -option slice=y ;create via pool
nas_fs -xtend $myfs size=${mygb}G pool=$mypl ;extend file system
/nas/sbin/rootnas_fs -xtend root_fs_2 size=${mygb}G pool=$mypl ;extend a root file system
server_mountpoint $mydm -create /$myfs ;create mountpoint
server_mountpoint $mydm -list ;list mount points
server_mount $mydm $myfs /$myfs ;mount file system to mount point
server_mount $mydm ;list mounted file systems
server_mount $mydm | grep -i un ;list of ummounted file systems
server_export $mydm -Protocol nfs /$myfs ;make file systems available to NFS
server_export $mydm ;see list of exported filesystems
cd /nas/quota/slot_2/test ;see contents of file system


server_uptime $mydm ;see how long data mover has been up
nas_version ;see nas version of control stations
nas_server -list ;view server table
nas_server -info $mydm ;data mover health
server_cpu $mydm -reboot ;reboot data mover
ls /nas/server/server_1/mounttab ;see mount tab
server_log $mydm -a -s | more ;check logs
grep -i "has panicked" /nas/log/sys_log* ;get logs
server_param $mydm -facility file -modify prefetch -value 0 ;turn off prefetch, requires reboot.
server_param ALL -facility file -list ;see file systems threshold alerts settings
server_standby server_2 -c mover=server_3 -polic auto ;configure server 3 as standby
server_standby server_2 -activate mover ;failover, standby takes MAC, IP, Config
server_standby server_2 -restore mover ;restore data mover
/usr/sbin/showmount -a $mydm ;see what servers are accessing the NFS shares
server_sysconfig ALL -pci ;see device pci info
server_sysconfig ALL -Platform ;see device dm specs
server_file $mydm -get passwd myfile ;basically using ftp to get/put files on data mover
nas_cel -list ;see replication interconnects
cel_fs target -list ;see file systems remotely linked


server_mount $mydm -option rw,noprefectch $myfs /$myfs ;for random file system access
server_mount $mydm -option rw,uncached $myfs /$myfs ;improves DB write performance
server_nfsstat $mydm -nfs ;see percentage of NFS calls
server_sysstat $mydm ;see cpu, memory
.server_config server_4 -v "printstats scsi" ;see scsi busy
.server_config ALL -verbose 'fcp show' ;see FA adaptors
.server_config ALL -verbose 'fcp bind show' ;check bindings


Devices (physicals nic)
server_sysconfig $mydm -v ;see virtual device configs i.e. fsn settings
server_sysconfig $mydm -v -i fsn0 ;check status
server_sysconfig $mydm -pci xxx -o speed=1000 ;hardware set speed
server_sysconfig $mydm -pci xxx -o duplex=full ;hardware set duplex
server_sysconfig $mydm -pci xxx -o linkneg=disable ;hardware disable
server_sysconfig $mydm -v -n fsn0 -c fsn -o "device=cge0,cge1" ;create fsn without primary device (recommend)
Interfaces (logicals)
server_ifconfig $mydm -c -D fsn0 -n fsn0 IP (ip,subnet,broadcast) ;configure interfaces
server_ifconfig $mydm -a ;see interfaces
server_netstat $mydm -s ;see resets
server_nfsstat server_4 -s ;io request sizes
server_ping $mydm $myserver ;test network by pinging address
server_ping $mydm -i $myinterface_desc $myserver ;for example -i ustpa3clr01-1-nfs-t2
server_route $mydm -list ;see routes
server_route $mydm -add default "gatewayaddr" ;set default route with destination as
server_route $mydm -add host "hostIPaddr" "gatewayaddr" "netmask" ;add host route
server_dns $mydm (dns_domain_nam) (ip_of_dns_server) ;set DNS
server_dns $mydm -o (start|stop|flush} ;dns start, stop
server_param $mydm -f dns -l ;check dns info
server_param $mydm -f dns -info updateMode -verbose ;dns details
server_param $mydm -f dns -m updateMode -v 1 ;dns changes
.server_config $mydm -v "dns query PTR=" ;nslookup for dart
.server_config $mydm -v "dns query" ;nslookup for dart
.server_config $mydm -v "dns query A=uxnbpr18" ;nslookup for dart


nas_disk -list ;to view a list of unused disk space and sizes
nas_disk -i d127 ;see disk details
nas_storage -c -a ;check storage
nas_storage -i -a ;display backend
nas_storage -i -a | grep port_status ;see HBA status
nas_storage -list ;display storage name and id.
nas_storage -failback id=1 ;like PowerPath restore for CX arrays i.e. first checks path is available
server_devconfig server_2 -list -scsi -all | more ;note, displayed LUN ID is HEX
nas_disk -query:inuse==n ;query example
symdev -sid 1384 -celerra ready 19E1 -noprompt ;if devices mapped and not ready


server_cifs $mydm ;see status of CIFS data movers
server_setup $mydm -P cifs -o start ;start cifs service
server_cifs $mydm -add compname=nasdm2,,interface=cge0-1 ;setup your data mover for CIFS
server_cifs $mydm -J compname=nasdm2,,admin=adminstrator ;join domain, needs password
server_export $mydm -Protocol cifs -name $share_name /$path_name ;export share
server_cifs $mydm -o audit ;see live CIFS connections
server_usermapper $mydm ;service maps SIDs to UIDs,


nas_quotas -t -on -fs $myfs -path /tree1 ;turn on quota tree, the "/tree1" must be created and empty
nas_quotas -t -off -fs $myfs -path /tree1 ;turn off quota tree, directory must be empty
nas_quotas -t -list -fs $myfs ;list the tree quotas, note the TreeId
nas_quotas -t -edit -fs $myfs 1 ;edit the tree quota values per TreeId in above command
nas_quotas -t -report -fs $myfs ;report on quota trees
server_param server_2 -f quota -info useQuotasInFsStat -v ;value=1 df to show only quota space.


server_ifconfig $mydm -c -D cge0 -n cge0-1 IP (ip,subnet,broadcast) ;configure interfaces
nas_server -name vdm01 -type vdm -create server_2 -setstate loaded pool=symm_std ;create a vdm on server_2
server_mount server_2 | grep root_fs_vdm_vmd01 ;confirm root file system created
server_setup $mydm -P cifs -o start ;start cifs
server_cifs vdm01 -add compname=vdm01,,interface=cge0-1 ;create cifs server on your vdm
server_cifs vdm01 -J compname=vdm01,,admin=adminstrator ;join domain
server_cifs vdm01 ;see CIFS info
nas_fs -name $myfs -create size=1G pool=$mypl -o slice=y ;create file system
server_mountpoint vdm01 -create /$myfs ;create mount point on vdm
server_mount vdm01 $myfs /$myfs ;mount file system on vdm
server_mountpoint vdm01 -create /$myfs/dir ;create directory to hide ./etc and lost&found
server_export vdm01 -P cifs -name $share_name /$myfs/dir ;export share name
\\vdm01\$share_name ;test share
/nas/sbin/rootnas_fs -info root_fs_vdm_vdm01 ;see info on vdm root file system
nas_server -v vdm01 -move server_3 ;move vdm, update dns entries


/nas/sbin/serial ;nas serial number
/nas/sbin/getreason ;check the control station status i.e. 10 and 11 mean up
cs_standby -failover ;issue from Active CS.
cs_standby -takeover ;issue from StandbyCS.
nas_checkup ;check nas.


server_date $mydm ;display time
server_date $mydm timesvc start ntp (NTP server IP) ;to start and immediately use returned time
server_date $mydm timesvc start ntp -sync_delay (NTP server IP) ;ntp start and begin slewing system time
server_date $mydm timesvc stop ;stop ntp
server_date $mydm timesvc delete ntp ;to change ntp configuration, delete then re-create
server_date $mydm timesvc ;see services configuration
server_date $mydm timesvc set ntp ;to immediately set time to system returned time
server_date $mydm timesvc update ntp ;to force data mover to start a gradual time slew
server_date $mydm timesvc stats ntp ;ntp stats, look for hits going up
server_date $mydm 0907161336 ;manually set date yymmddhhmm July 16 1:36PM
/sbin/service ntpd status ;see if ntpd is running on control station