source: src-sh/lpreserver/backend/functions.sh @ 9cd0fa0

releng/10.0.1releng/10.0.2
Last change on this file since 9cd0fa0 was 9cd0fa0, checked in by Kris Moore <kris@…>, 5 months ago
  • Major update to life-preserver backend

Add new "automatic" mode for creating snapshots

When enabled, this will create snapshots in the following manner:

  • Snapshots will be created every 5 minutes and kept for an hour.
  • A hourly snapshot will be kept for a day.
  • A daily snapshot will be kept for a month.
  • A Monthly snapshot will be kept for a year.
  • The life-preserver daemon will also keep track of the zpool disk space, if the capacity falls below 75%, the oldest snapshot will be auto-pruned.
  • Property mode set to 100755
File size: 18.1 KB
Line 
1#!/bin/sh
2# Functions / variables for lpreserver
3######################################################################
4# DO NOT EDIT
5
6# Source external functions
7. /usr/local/share/pcbsd/scripts/functions.sh
8
9# Installation directory
10PROGDIR="/usr/local/share/lpreserver"
11
12# Location of settings
13DBDIR="/var/db/lpreserver"
14if [ ! -d "$DBDIR" ] ; then mkdir -p ${DBDIR} ; fi
15
16CMDLOG="${DBDIR}/lp-lastcmdout"
17CMDLOG2="${DBDIR}/lp-lastcmdout2"
18REPCONF="${DBDIR}/replication"
19LOGDIR="/var/log/lpreserver"
20REPLOGSEND="${LOGDIR}/lastrep-send-log"
21REPLOGRECV="${LOGDIR}/lastrep-recv-log"
22MSGQUEUE="${DBDIR}/.lpreserver.msg.$$"
23export DBDIR LOGDIR PROGDIR CMDLOG REPCONF REPLOGSEND REPLOGRECV MSGQUEUE
24
25# Create the logdir
26if [ ! -d "$LOGDIR" ] ; then mkdir -p ${LOGDIR} ; fi
27
28#Set our Options
29setOpts() {
30  if [ -e "${DBDIR}/recursive-off" ] ; then
31    export RECURMODE="OFF"
32  else
33    export RECURMODE="ON"
34  fi
35
36  if [ -e "${DBDIR}/emaillevel" ] ; then
37    export EMAILMODE="`cat ${DBDIR}/emaillevel`"
38  fi
39
40  if [ -e "${DBDIR}/duwarn" ] ; then
41    export DUWARN="`cat ${DBDIR}/duwarn`"
42  else
43    export DUWARN=85
44  fi
45
46  case $EMAILMODE in
47      ALL|WARN|ERROR) ;;
48        *) export EMAILMODE="WARN";;
49  esac
50
51  if [ -e "${DBDIR}/emails" ] ; then
52    export EMAILADDY="`cat ${DBDIR}/emails`"
53  fi
54
55}
56setOpts
57
58
59# Check if a directory is mounted
60isDirMounted() {
61  mount | grep -q "on $1 ("
62  return $?
63}
64
65mkZFSSnap() {
66  if [ "$RECURMODE" = "ON" ] ; then
67     flags="-r"
68  else
69     flags="-r"
70  fi
71  zdate=`date +%Y-%m-%d-%H-%M-%S`
72  zfs snapshot $flags ${1}@$2${zdate} >${CMDLOG} 2>${CMDLOG}
73  return $?
74}
75
76listZFSSnap() {
77  zfs list -t snapshot | grep -e "^NAME" -e "^${1}@"
78}
79
80rmZFSSnap() {
81  `zfs list -t snapshot | grep -q "^$1@$2 "` || exit_err "No such snapshot!"
82  if [ "$RECURMODE" = "ON" ] ; then
83     flags="-r"
84  else
85     flags="-r"
86  fi
87  zfs destroy -r ${1}@${2} >${CMDLOG} 2>${CMDLOG}
88  return $?
89}
90
91revertZFSSnap() {
92  # Make sure this is a valid snapshot
93  `zfs list -t snapshot | grep -q "^$1@$2 "` || exit_err "No such snapshot!"
94
95  # Rollback the snapshot
96  zfs rollback -R -f ${1}@$2
97}
98
99enable_cron()
100{
101   cronscript="${PROGDIR}/backend/runsnap.sh"
102
103   # Make sure we remove any old entries for this dataset
104   cat /etc/crontab | grep -v " $cronscript $1" > /etc/crontab.new
105   mv /etc/crontab.new /etc/crontab
106   if [ "$2" = "OFF" ] ; then
107      return
108   fi
109
110   case $2 in
111       daily) cLine="0       $4      *       *       *" ;;
112      hourly) cLine="0       *       *       *       *" ;;
113       30min) cLine="0,30    *       *       *       *" ;;
114       10min) cLine="*/10    *       *       *       *" ;;
115   5min|auto) cLine="*/5     *       *       *       *" ;;
116           *) exit_err "Invalid time specified" ;;
117   esac
118
119   echo -e "$cLine\troot    ${cronscript} $1 $3" >> /etc/crontab
120}
121
122enable_watcher()
123{
124   cronscript="${PROGDIR}/backend/zfsmon.sh"
125
126   # Check if the zfs monitor is already enabled
127   grep -q " $cronscript" /etc/crontab
128   if [ $? -eq 0 ] ; then return; fi
129
130   cLine="*/30    *       *       *       *"
131
132   echo -e "$cLine\troot    ${cronscript}" >> /etc/crontab
133}
134
135snaplist() {
136  zfs list -t snapshot | grep "^${1}@" | cut -d '@' -f 2 | awk '{print $1}'
137}
138
139echo_log() {
140   echo "`date`: $@" >> ${LOGDIR}/lpreserver.log
141}
142
143# E-Mail a message to the set addresses
144# 1 = subject tag
145# 2 = Message
146email_msg() {
147   if [ -z "$EMAILADDY" ] ; then return ; fi
148   echo -e "$2"  | mail -s "$1 - `hostname`" $EMAILADDY
149}
150
151queue_msg() {
152  echo -e "$1" >> ${MSGQUEUE}
153  if [ -n "$2" ] ; then
154    cat $2 >> ${MSGQUEUE}
155  fi
156}
157
158echo_queue_msg() {
159  if [ ! -e "$MSGQUEUE" ] ; then return ; fi
160  cat ${MSGQUEUE}
161  rm ${MSGQUEUE}
162}
163
164add_rep_task() {
165  # add freenas.8343 backupuser 22 tank1/usr/home/kris tankbackup/backups sync
166  HOST=$1
167  USER=$2
168  PORT=$3
169  LDATA=$4
170  RDATA=$5
171  TIME=$6
172
173  case $TIME in
174     [0-9][0-9]|sync)  ;;
175     *) exit_err "Invalid time: $TIME"
176  esac
177 
178  echo "Adding replication task for local dataset $LDATA"
179  echo "----------------------------------------------------------"
180  echo "   Remote Host: $HOST" 
181  echo "   Remote User: $USER" 
182  echo "   Remote Port: $PORT" 
183  echo "Remote Dataset: $RDATA" 
184  echo "          Time: $TIME" 
185  echo "----------------------------------------------------------"
186  echo "Don't forget to ensure that this user / dataset exists on the remote host"
187  echo "with the correct permissions!"
188
189  rem_rep_task "$LDATA"
190  echo "$LDATA:$TIME:$HOST:$USER:$PORT:$RDATA" >> ${REPCONF}
191
192  if [ "$TIME" != "sync" ] ; then
193    cronscript="${PROGDIR}/backend/runrep.sh"
194    cLine="0    $TIME       *       *       *"
195    echo -e "$cLine\troot    ${cronscript} ${LDATA}" >> /etc/crontab
196  fi
197}
198
199rem_rep_task() {
200  if [ ! -e "$REPCONF" ] ; then return ; fi
201  cat ${REPCONF} | grep -v "^${1}:" > ${REPCONF}.tmp
202  mv ${REPCONF}.tmp ${REPCONF}
203
204  # Make sure we remove any old replication entries for this dataset
205  cronscript="${PROGDIR}/backend/runrep.sh"
206  cat /etc/crontab | grep -v " $cronscript $1" > /etc/crontab.new
207  mv /etc/crontab.new /etc/crontab
208}
209
210list_rep_task() {
211  if [ ! -e "$REPCONF" ] ; then return ; fi
212
213  echo "Scheduled replications:"
214  echo "---------------------------------"
215
216  while read line
217  do
218     LDATA=`echo $line | cut -d ':' -f 1`
219     TIME=`echo $line | cut -d ':' -f 2`
220     HOST=`echo $line | cut -d ':' -f 3`
221     USER=`echo $line | cut -d ':' -f 4`
222     PORT=`echo $line | cut -d ':' -f 5`
223     RDATA=`echo $line | cut -d ':' -f 6`
224
225     echo "$LDATA -> $USER@$HOST[$PORT]:$RDATA Time: $TIME"
226
227  done < ${REPCONF}
228}
229
230check_rep_task() {
231  export DIDREP=0
232  if [ ! -e "$REPCONF" ] ; then return 0; fi
233
234  repLine=`cat ${REPCONF} | grep "^${1}:"`
235  if [ -z "$repLine" ] ; then return 0; fi
236
237  # We have a replication task for this dataset, lets check if we need to do it now
238  LDATA="$1"
239  REPTIME=`echo $repLine | cut -d ':' -f 2`
240
241  # Export the replication variables we will be using
242  export REPHOST=`echo $repLine | cut -d ':' -f 3`
243  export REPUSER=`echo $repLine | cut -d ':' -f 4`
244  export REPPORT=`echo $repLine | cut -d ':' -f 5`
245  export REPRDATA=`echo $repLine | cut -d ':' -f 6`
246
247  if [ "$2" = "force" ] ; then
248     # Ready to do a forced replication
249     export DIDREP=1
250     echo_log "Starting replication MANUAL task on ${DATASET}: ${REPLOGSEND}"
251     queue_msg "`date`: Starting replication MANUAL task on ${DATASET}\n"
252     start_rep_task "$LDATA"
253     return $?
254  fi
255
256  # If we are checking for a sync task, and the rep isn't marked as sync we can return
257  if [ "$2" = "sync" -a "$REPTIME" != "sync" ] ; then return 0; fi
258
259  # Doing a replication task, check if one is in progress
260  export pidFile="${DBDIR}/.reptask-`echo ${LDATA} | sed 's|/|-|g'`"
261  if [ -e "${pidFile}" ] ; then
262     pgrep -F ${pidFile} >/dev/null 2>/dev/null
263     if [ $? -eq 0 ] ; then
264        echo_log "Skipped replication on $LDATA, previous replication is still running."
265        return 0
266     else
267        rm ${pidFile}
268     fi
269  fi
270
271  # Save this PID
272  echo "$$" > ${pidFile}
273
274  # Is this a sync-task we do at the time of a snapshot?
275  if [ "$2" = "sync" -a "$REPTIME" = "sync" ] ; then
276     export DIDREP=1
277     echo_log "Starting replication SYNC task on ${DATASET}: ${REPLOGSEND}"
278     queue_msg "`date`: Starting replication SYNC task on ${DATASET}\n"
279     start_rep_task "$LDATA"
280     return $?
281  else
282     # Ready to do a scheduled replication
283     export DIDREP=1
284     echo_log "Starting replication SCHEDULED task on ${DATASET}: ${REPLOGSEND}"
285     queue_msg "`date`: Starting replication SCHEDULED task on ${DATASET}\n"
286     start_rep_task "$LDATA"
287     return $?
288  fi
289}
290
291start_rep_task() {
292  LDATA="$1"
293  hName=`hostname`
294
295  # Check for the last snapshot marked as replicated already
296  lastSEND=`zfs get -r backup:lpreserver ${LDATA} | grep LATEST | awk '{$1=$1}1' OFS=" " | tail -1 | cut -d '@' -f 2 | cut -d ' ' -f 1`
297
298  # Lets get the last snapshot for this dataset
299  lastSNAP=`zfs list -t snapshot -d 1 -H ${LDATA} | tail -1 | awk '{$1=$1}1' OFS=" " | cut -d '@' -f 2 | cut -d ' ' -f 1`
300 
301  if [ "$lastSEND" = "$lastSNAP" ] ; then
302     queue_msg "`date`: Last snapshot $lastSNAP is already marked as replicated!"
303     rm ${pidFile}
304     return 1
305  fi
306
307  # Starting replication, first lets check if we can do an incremental send
308  if [ -n "$lastSEND" ] ; then
309     zFLAGS="-Rv -I $lastSEND $LDATA@$lastSNAP"
310  else
311     zFLAGS="-Rv $LDATA@$lastSNAP"
312
313     # This is a first-time replication, lets create the new target dataset
314     ssh -p ${REPPORT} ${REPUSER}@${REPHOST} zfs create ${REPRDATA}/${hName} >${CMDLOG} 2>${CMDLOG}
315  fi
316
317  zSEND="zfs send $zFLAGS"
318  zRCV="ssh -p ${REPPORT} ${REPUSER}@${REPHOST} zfs receive -dvuF ${REPRDATA}/${hName}"
319
320  queue_msg "Using ZFS send command:\n$zSEND | $zRCV\n\n"
321
322  # Start up our process
323  $zSEND 2>${REPLOGSEND} | $zRCV >${REPLOGRECV} 2>${REPLOGRECV}
324  zStatus=$?
325  queue_msg "ZFS SEND LOG:\n--------------\n" "${REPLOGSEND}"
326  queue_msg "ZFS RCV LOG:\n--------------\n" "${REPLOGRECV}"
327
328  if [ $zStatus -eq 0 ] ; then
329     # SUCCESS!
330     # Lets mark our new latest snapshot and unmark the last one
331     if [ -n "$lastSEND" ] ; then
332       zfs set backup:lpreserver=' ' ${LDATA}@$lastSEND
333     fi
334     zfs set backup:lpreserver=LATEST ${LDATA}@$lastSNAP
335     echo_log "Finished replication task on ${DATASET}"
336     save_rep_props
337     zStatus=$?
338  else
339     # FAILED :-(
340     # Lets save the output for us to look at later
341     FLOG=${LOGDIR}/lpreserver_failed.log
342     echo "Failed with command:\n$zSEND | $zRCV\n" > ${FLOG}
343     echo "\nSending log:\n" >> ${FLOG}
344     cat ${REPLOGSEND} >> ${FLOG}
345     echo "\nRecv log:\n" >> ${FLOG}
346     cat ${REPLOGRECV} >> ${FLOG}
347     echo_log "FAILED replication task on ${DATASET}: LOGFILE: $FLOG"
348  fi
349
350  rm ${pidFile}
351  return $zStatus
352}
353
354save_rep_props() {
355  # If we are not doing a recursive backup / complete dataset we can skip this
356  if [ "$RECURMODE" != "ON" ] ; then return 0; fi
357  if [ "`basename $DATASET`" != "$DATASET" ] ; then return 0; fi
358  hName="`hostname`"
359
360  echo_log "Saving dataset properties for: ${DATASET}"
361  queue_msg "`date`: Saving dataset properties for: ${DATASET}\n"
362
363  # Lets start by building a list of props to keep
364  rProp=".lp-props-`echo ${REPRDATA}/${hName} | sed 's|/|#|g'`"
365
366  zfs get -r all $DATASET | grep ' local$' | awk '{$1=$1}1' OFS=" " | sed 's| local$||g' \
367        | ssh -p ${REPPORT} ${REPUSER}@${REPHOST} "cat > \"$rProp\""
368  if [ $? -eq 0 ] ; then
369    echo_log "Successful save of dataset properties for: ${DATASET}"
370    queue_msg "`date`: Successful save of dataset properties for: ${DATASET}\n"
371    return 0
372  else
373    echo_log "Failed saving dataset properties for: ${DATASET}"
374    queue_msg "`date`: Failed saving dataset properties for: ${DATASET}\n"
375    return 1
376  fi
377}
378
379listStatus() {
380
381  for i in `grep "${PROGDIR}/backend/runsnap.sh" /etc/crontab | awk '{print $8}'`
382  do
383    echo -e "DATASET - SNAPSHOT - REPLICATION"
384    echo "------------------------------------------"
385
386    lastSEND=`zfs get -r backup:lpreserver ${i} | grep LATEST | awk '{$1=$1}1' OFS=" " | tail -1 | cut -d '@' -f 2 | cut -d ' ' -f 1`
387    lastSNAP=`zfs list -t snapshot -d 1 -H ${i} | tail -1 | awk '{$1=$1}1' OFS=" " | cut -d '@' -f 2 | cut -d ' ' -f 1`
388
389    if [ -z "$lastSEND" ] ; then lastSEND="NONE"; fi
390    if [ -z "$lastSNAP" ] ; then lastSNAP="NONE"; fi
391
392    echo "$i - $lastSNAP - $lastSEND"
393  done
394}
395
396add_zpool_disk() {
397   pool="$1"
398   disk="$2"
399   disk="`echo $disk | sed 's|/dev/||g'`"
400
401   if [ -z "$pool" ] ; then
402      exit_err "No pool specified"
403      exit 0
404   fi
405
406   if [ -z "$disk" ] ; then
407      exit_err "No disk specified"
408      exit 0
409   fi
410
411   if [ ! -e "/dev/$disk" ] ; then
412      exit_err "No such device: $disk"
413      exit 0
414   fi
415
416   zpool list -H -v | awk '{print $1}' | grep -q "^$disk"
417   if [ $? -eq 0 ] ; then
418      exit_err "Error: This disk is already apart of a zpool!"
419   fi
420
421   # Check if pool exists
422   zpool status $pool >/dev/null 2>/dev/null
423   if [ $? -ne 0 ] ; then exit_err "Invalid pool: $pool"; fi
424
425   # Cleanup the target disk
426   echo "Deleting all partitions on: $disk"
427   rc_nohalt "gpart destroy -F $disk" >/dev/null 2>/dev/null
428   rc_nohalt "dd if=/dev/zero of=/dev/${disk} bs=1m count=1" >/dev/null 2>/dev/null
429   rc_nohalt "dd if=/dev/zero of=/dev/${disk} bs=1m oseek=`diskinfo /dev/${disk} | awk '{print int($3 / (1024*1024)) - 4;}'`" >/dev/null 2>/dev/null
430
431   # Grab the first disk in the pool
432   mDisk=`zpool list -H -v | grep -v "^$pool" | awk '{print $1}' | grep -v "^mirror" | grep -v "^raidz" | head -n 1`
433
434   # Is this MBR or GPT?
435   echo $mDisk | grep -q 's[0-4][a-z]$'
436   if [ $? -eq 0 ] ; then
437      # MBR
438      type="MBR"
439      # Strip off the "a-z"
440      rDiskDev=`echo $mDisk | rev | cut -c 2- | rev`
441   else
442      # GPT
443      type="GPT"
444      # Strip off the "p[1-9]"
445      rDiskDev=`echo $mDisk | rev | cut -c 3- | rev`
446   fi
447
448   # Make sure this disk has a layout we can read
449   gpart show $rDiskDev >/dev/null 2>/dev/null
450   if [ $? -ne 0 ] ; then
451      exit_err "failed to get disk device layout $rDiskDev"
452   fi
453
454   # Get the size of "freebsd-zfs & freebsd-swap"
455   sSize=`gpart show ${rDiskDev} | grep freebsd-swap | cut -d "(" -f 2 | cut -d ")" -f 1`
456   zSize=`gpart show ${rDiskDev} | grep freebsd-zfs | cut -d "(" -f 2 | cut -d ")" -f 1`
457   
458   echo "Creating new partitions on $disk"
459   if [ "$type" = "MBR" ] ; then
460      # Create the new MBR layout
461      rc_halt_s "gpart create -s MBR -f active $disk"
462      rc_halt_s "gpart add -a 4k -t freebsd $disk"     
463      rc_halt_s "gpart set -a active -i 1 $disk"
464      rc_halt_s "gpart create -s BSD ${disk}s1"
465      rc_halt_s "gpart add -t freebsd-zfs -s $zSize ${disk}s1"
466      if [ -n "$sSize" ] ; then
467        rc_halt_s "gpart add -t freebsd-swap -s $sSize ${disk}s1"
468      fi
469      aDev="${disk}s1a"
470   else
471      # Creating a GPT disk
472      rc_halt_s "gpart create -s GPT $disk"
473      rc_halt_s "gpart add -b 34 -s 1M -t bios-boot $disk"
474      rc_halt_s "gpart add -t freebsd-zfs -s $zSize ${disk}"
475      if [ -n "$sSize" ] ; then
476        rc_halt_s "gpart add -t freebsd-swap -s $sSize ${disk}"
477      fi
478      aDev="${disk}p2"
479   fi
480
481   # Now we can insert the target disk
482   echo "Attaching to zpool: $aDev"
483   rc_halt_s "zpool attach $pool $mDisk $aDev"
484
485   # Lastly we need to stamp GRUB
486   echo "Stamping GRUB on: $disk"
487   rc_halt_s "grub-install --force /dev/${disk}"
488
489   echo "Added $disk ($aDev) to zpool $pool. Resilver will begin automatically."
490   exit 0
491}
492
493list_zpool_disks() {
494   pool="$1"
495
496   if [ -z "$pool" ] ; then
497      exit_err "No pool specified"
498      exit 0
499   fi
500
501   # Check if pool exists
502   zpool status $pool >/dev/null 2>/dev/null
503   if [ $? -ne 0 ] ; then exit_err "Invalid pool: $pool"; fi
504
505   zpool list -H -v $pool
506}
507
508rem_zpool_disk() {
509   pool="$1"
510   disk="$2"
511
512   if [ -z "$pool" ] ; then
513      exit_err "No pool specified"
514      exit 0
515   fi
516
517   if [ -z "$disk" ] ; then
518      exit_err "No disk specified"
519      exit 0
520   fi
521
522   # Check if pool exists
523   zpool status $pool >/dev/null 2>/dev/null
524   if [ $? -ne 0 ] ; then exit_err "Invalid pool: $pool"; fi
525
526   zpool detach $pool $disk
527   if [ $? -ne 0 ] ; then
528      exit_err "Failed detaching $disk"
529   fi
530   echo "$disk was detached successfully!"
531   exit 0
532}
533
534offline_zpool_disk() {
535   pool="$1"
536   disk="$2"
537
538   if [ -z "$pool" ] ; then
539      exit_err "No pool specified"
540      exit 0
541   fi
542
543   if [ -z "$disk" ] ; then
544      exit_err "No disk specified"
545      exit 0
546   fi
547
548   # Check if pool exists
549   zpool status $pool >/dev/null 2>/dev/null
550   if [ $? -ne 0 ] ; then exit_err "Invalid pool: $pool"; fi
551
552   zpool offline $pool $disk
553   exit $?
554}
555
556online_zpool_disk() {
557   pool="$1"
558   disk="$2"
559
560   if [ -z "$pool" ] ; then
561      exit_err "No pool specified"
562      exit 0
563   fi
564
565   if [ -z "$disk" ] ; then
566      exit_err "No disk specified"
567      exit 0
568   fi
569
570   # Check if pool exists
571   zpool status $pool >/dev/null 2>/dev/null
572   if [ $? -ne 0 ] ; then exit_err "Invalid pool: $pool"; fi
573
574   zpool online $pool $disk
575   exit $?
576}
577
578init_rep_task() {
579
580  LDATA="$1"
581
582  repLine=`cat ${REPCONF} | grep "^${LDATA}:"`
583  if [ -z "$repLine" ] ; then return 0; fi
584 
585  # We have a replication task for this set, get some vars
586  hName=`hostname`
587  REPHOST=`echo $repLine | cut -d ':' -f 3`
588  REPUSER=`echo $repLine | cut -d ':' -f 4`
589  REPPORT=`echo $repLine | cut -d ':' -f 5`
590  REPRDATA=`echo $repLine | cut -d ':' -f 6`
591
592  # First check if we even have a dataset on the remote
593  ssh -p ${REPPORT} ${REPUSER}@${REPHOST} zfs list ${REPRDATA}/${hName} 2>/dev/null >/dev/null
594  if [ $? -eq 0 ] ; then
595     # Lets cleanup the remote side
596     echo "Removing remote dataset: ${REPRDATA}/${hName}"
597     ssh -p ${REPPORT} ${REPUSER}@${REPHOST} zfs destroy -r ${REPRDATA}/${hName}
598     if [ $? -ne 0 ] ; then
599        echo "Warning: Could not delete remote dataset ${REPRDATA}/${hName}"
600     fi
601  fi
602
603  # Now lets mark none of our datasets as replicated
604  lastSEND=`zfs get -r backup:lpreserver ${LDATA} | grep LATEST | awk '{$1=$1}1' OFS=" " | tail -1 | cut -d '@' -f 2 | cut -d ' ' -f 1`
605  if [ -n "$lastSEND" ] ; then
606     zfs set backup:lpreserver=' ' ${LDATA}@$lastSEND
607  fi
608
609}
610
611## Function to remove the oldest life-preserver snapshot on the target
612## zpool, used by zfsmon.sh when the disk space gets low
613do_pool_cleanup()
614{
615  # Is this zpool managed by life-preserver?
616  grep -q "${PROGDIR}/backend/runsnap.sh ${1} " /etc/crontab
617  if [ $? -ne 0 ] ; then return ; fi
618
619  # Before we start pruning, check if any replication is running
620  local pidFile="${DBDIR}/.reptask-`echo ${1} | sed 's|/|-|g'`"
621  if [ -e "${pidFile}" ] ; then
622     pgrep -F ${pidFile} >/dev/null 2>/dev/null
623     if [ $? -eq 0 ] ; then return; fi
624  fi
625
626  # Get the list of snapshots for this zpool
627  snapList=$(snaplist "${1}")
628
629  # Do any pruning
630  for snap in $snapList
631  do
632     # Only remove snapshots which are auto-created by life-preserver
633     cur="`echo $snap | cut -d '-' -f 1`"
634     if [ "$cur" != "auto" ] ; then continue; fi
635
636     echo_log "Pruning old snapshot: $snap"
637     rmZFSSnap "${1}" "$snap"
638     if [ $? -ne 0 ] ; then
639       haveMsg=1
640       echo_log "ERROR: (Low Disk Space) Failed pruning snapshot $snap on ${1}"
641       queue_msg "ERROR: (Low Disk Space) Failed pruning snapshot $snap on ${1} @ `date` \n\r`cat $CMDLOG`"
642     else
643       queue_msg "(Low Disk Space) Auto-pruned snapshot: $snap on ${1} @ `date`\n\r`cat $CMDLOG`"
644       haveMsg=1
645     fi
646
647     # We only prune a single snapshot at this time, so lets end
648     break
649  done
650
651  return 0
652}
Note: See TracBrowser for help on using the repository browser.