File: //bin/duplicity
#! /usr/bin/python2
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# duplicity -- Encrypted bandwidth efficient backup
# Version 0.6.18 released February 29, 2012
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.nongnu.org/duplicity for more information.
# Please send mail to me or the mailing list if you find bugs or have
# any suggestions.
import getpass, gzip, os, sys, time, types
import traceback, platform, statvfs, resource, re
pwd = os.path.abspath(os.path.dirname(sys.argv[0]))
if os.path.exists(os.path.join(pwd, "../duplicity")):
sys.path.insert(0, os.path.abspath(os.path.join(pwd, "../.")))
def _find_duplicity_pylib(path):
if not os.path.isdir(path):
return None
for fpath, dnames, fnames in os.walk(path):
if 'duplicity' in dnames:
return fpath
return None
install_root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../"))
sys.path.insert(0, _find_duplicity_pylib(install_root))
import gettext
gettext.install('duplicity')
from duplicity import log
log.setup()
import duplicity.errors
from duplicity import collections
from duplicity import commandline
from duplicity import diffdir
from duplicity import dup_temp
from duplicity import dup_time
from duplicity import file_naming
from duplicity import globals
from duplicity import gpg
from duplicity import manifest
from duplicity import patchdir
from duplicity import path
from duplicity import robust
from duplicity import tempdir
from duplicity import asyncscheduler
from duplicity import util
# If exit_val is not None, exit with given value at end.
exit_val = None
def get_passphrase(n, action, for_signing = False):
"""
Check to make sure passphrase is indeed needed, then get
the passphrase from environment, from gpg-agent, or user
If n=3, a password is requested and verified. If n=2, the current
password is verified. If n=1, a password is requested without
verification for the time being.
@type n: int
@param n: verification level for a passphrase being requested
@type action: string
@param action: action to perform
@type for_signing: boolean
@param for_signing: true if the passphrase is for a signing key, false if not
@rtype: string
@return: passphrase
"""
# First try the environment
try:
if for_signing:
return os.environ['SIGN_PASSPHRASE']
else:
return os.environ['PASSPHRASE']
except KeyError:
pass
# check if we can reuse an already set (signing_)passphrase
## if signing key is also an encryption key assume that the passphrase is identical
if ( for_signing
and globals.gpg_profile.sign_key in globals.gpg_profile.recipients
and 'PASSPHRASE' in os.environ ):
log.Notice(_("Reuse configured PASSPHRASE as SIGN_PASSPHRASE"))
return os.environ['PASSPHRASE']
## if one encryption key is also the signing key assume that the passphrase is identical
if ( not for_signing
and globals.gpg_profile.sign_key in globals.gpg_profile.recipients
and 'SIGN_PASSPHRASE' in os.environ ):
log.Notice(_("Reuse configured SIGN_PASSPHRASE as PASSPHRASE"))
return os.environ['SIGN_PASSPHRASE']
# Next, verify we need to ask the user
# Assumptions:
# - encrypt-key has no passphrase
# - sign-key requires passphrase
# - gpg-agent supplies all, no user interaction
# no passphrase if --no-encryption or --use-agent
if not globals.encryption or globals.use_agent:
return ""
# these commands don't need a password
elif action in ["collection-status",
"list-current",
"remove-all-but-n-full",
"remove-all-inc-of-but-n-full",
"remove-old",
]:
return ""
# for a full backup, we don't need a password if
# there is no sign_key and there are recipients
elif (action == "full"
and globals.gpg_profile.recipients
and not globals.gpg_profile.sign_key):
return ""
# for an inc backup, we don't need a password if
# there is no sign_key and there are recipients
elif (action == "inc"
and globals.gpg_profile.recipients
and not globals.gpg_profile.sign_key):
return ""
# Finally, ask the user for the passphrase
else:
log.Info(_("PASSPHRASE variable not set, asking user."))
use_cache = True
while 1:
# ask the user to enter a new passphrase to avoid an infinite loop
# if the user made a typo in the first passphrase
if use_cache and n == 2:
if for_signing:
pass1 = globals.gpg_profile.signing_passphrase
else:
pass1 = globals.gpg_profile.passphrase
else:
if for_signing:
if use_cache and globals.gpg_profile.signing_passphrase:
pass1 = globals.gpg_profile.signing_passphrase
else:
pass1 = getpass.getpass(_("GnuPG passphrase for signing key:")+" ")
else:
if use_cache and globals.gpg_profile.passphrase:
pass1 = globals.gpg_profile.passphrase
else:
pass1 = getpass.getpass(_("GnuPG passphrase:")+" ")
if n == 1:
pass2 = pass1
elif for_signing:
pass2 = getpass.getpass(_("Retype passphrase for signing key to confirm: "))
else:
pass2 = getpass.getpass(_("Retype passphrase to confirm: "))
if not pass1 == pass2:
print _("First and second passphrases do not match! Please try again.")
use_cache = False
continue
if not pass1 and not globals.gpg_profile.recipients and not for_signing:
print _("Cannot use empty passphrase with symmetric encryption! Please try again.")
use_cache = False
continue
return pass1
def dummy_backup(tarblock_iter):
"""
Fake writing to backend, but do go through all the source paths.
@type tarblock_iter: tarblock_iter
@param tarblock_iter: iterator for current tar block
@rtype: int
@return: constant 0 (zero)
"""
try:
# Just spin our wheels
while tarblock_iter.next():
pass
except StopIteration:
pass
log.Progress(None, diffdir.stats.SourceFileSize)
return 0
def restart_position_iterator(tarblock_iter):
"""
Fake writing to backend, but do go through all the source paths.
Stop when we have processed the last file and block from the
last backup. Normal backup will proceed at the start of the
next volume in the set.
@type tarblock_iter: tarblock_iter
@param tarblock_iter: iterator for current tar block
@rtype: int
@return: constant 0 (zero)
"""
last_index = globals.restart.last_index
last_block = globals.restart.last_block
try:
# Just spin our wheels
while tarblock_iter.next():
if (tarblock_iter.previous_index == last_index):
if (tarblock_iter.previous_block > last_block):
break
if tarblock_iter.previous_index > last_index:
log.Warn(_("File %s complete in backup set.\n"
"Continuing restart on file %s.") %
("/".join(last_index), "/".join(tarblock_iter.previous_index)),
log.ErrorCode.restart_file_not_found)
break
except StopIteration:
log.Warn(_("File %s missing in backup set.\n"
"Continuing restart on file %s.") %
("/".join(last_index), "/".join(tarblock_iter.previous_index)),
log.ErrorCode.restart_file_not_found)
return 0
def write_multivol(backup_type, tarblock_iter, man_outfp, sig_outfp, backend):
"""
Encrypt volumes of tarblock_iter and write to backend
backup_type should be "inc" or "full" and only matters here when
picking the filenames. The path_prefix will determine the names
of the files written to backend. Also writes manifest file.
Returns number of bytes written.
@type backup_type: string
@param backup_type: type of backup to perform, either 'inc' or 'full'
@type tarblock_iter: tarblock_iter
@param tarblock_iter: iterator for current tar block
@type backend: callable backend object
@param backend: I/O backend for selected protocol
@rtype: int
@return: bytes written
"""
def get_indicies(tarblock_iter):
"""Return start_index and end_index of previous volume"""
start_index, start_block = tarblock_iter.recall_index()
if start_index is None:
start_index = ()
start_block = None
if start_block:
start_block -= 1
end_index, end_block = tarblock_iter.get_previous_index()
if end_index is None:
end_index = start_index
end_block = start_block
if end_block:
end_block -= 1
return start_index, start_block, end_index, end_block
def validate_block(orig_size, dest_filename):
info = backend.query_info([dest_filename])[dest_filename]
if 'size' not in info:
return # backend didn't know how to query size
size = info['size']
if size is None:
return # error querying file
if size != orig_size:
code_extra = "%s %d %d" % (util.escape(dest_filename), orig_size, size)
log.FatalError(_("File %s was corrupted during upload.") % dest_filename,
log.ErrorCode.volume_wrong_size, code_extra)
def put(tdp, dest_filename, vol_num):
"""
Retrieve file size *before* calling backend.put(), which may (at least
in case of the localbackend) rename the temporary file to the target
instead of copying.
"""
putsize = tdp.getsize()
if globals.skip_volume != vol_num: # for testing purposes only
backend.put(tdp, dest_filename)
validate_block(putsize, dest_filename)
if tdp.stat:
tdp.delete()
return putsize
def validate_encryption_settings(backup_set, manifest):
"""
When restarting a backup, we have no way to verify that the current
passphrase is the same as the one used for the beginning of the backup.
This is because the local copy of the manifest is unencrypted and we
don't need to decrypt the existing volumes on the backend. To ensure
that we are using the same passphrase, we manually download volume 1
and decrypt it with the current passphrase. We also want to confirm
that we're using the same encryption settings (i.e. we don't switch
from encrypted to non in the middle of a backup chain), so we check
that the vol1 filename on the server matches the settings of this run.
"""
vol1_filename = file_naming.get(backup_type, 1,
encrypted=globals.encryption,
gzipped=globals.compression)
if vol1_filename != backup_set.volume_name_dict[1]:
log.FatalError(_("Restarting backup, but current encryption "
"settings do not match original settings"),
log.ErrorCode.enryption_mismatch)
# Settings are same, let's check passphrase itself if we are encrypted
if globals.encryption:
fileobj = restore_get_enc_fileobj(globals.backend, vol1_filename,
manifest.volume_info_dict[1])
fileobj.close()
if not globals.restart:
# normal backup start
vol_num = 0
mf = manifest.Manifest(fh=man_outfp)
mf.set_dirinfo()
else:
# restart from last known position
mf = globals.restart.last_backup.get_local_manifest()
globals.restart.checkManifest(mf)
globals.restart.setLastSaved(mf)
validate_encryption_settings(globals.restart.last_backup, mf)
mf.fh = man_outfp
last_block = globals.restart.last_block
log.Notice("Restarting after volume %s, file %s, block %s" %
(globals.restart.start_vol,
"/".join(globals.restart.last_index),
globals.restart.last_block))
vol_num = globals.restart.start_vol
restart_position_iterator(tarblock_iter)
at_end = 0
bytes_written = 0
# This assertion must be kept until we have solved the problem
# of concurrency at the backend level. Concurrency 1 is fine
# because the actual I/O concurrency on backends is limited to
# 1 as usual, but we are allowed to perform local CPU
# intensive tasks while that single upload is happening. This
# is an assert put in place to avoid someone accidentally
# enabling concurrency above 1, before adequate work has been
# done on the backends to make them support concurrency.
assert globals.async_concurrency <= 1
io_scheduler = asyncscheduler.AsyncScheduler(globals.async_concurrency)
async_waiters = []
while not at_end:
# set up iterator
tarblock_iter.remember_next_index() # keep track of start index
# Create volume
vol_num += 1
dest_filename = file_naming.get(backup_type, vol_num,
encrypted=globals.encryption,
gzipped=globals.compression)
tdp = dup_temp.new_tempduppath(file_naming.parse(dest_filename))
# write volume
if globals.encryption:
at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name,
globals.gpg_profile, globals.volsize)
else:
at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, globals.volsize)
tdp.setdata()
# Add volume information to manifest
vi = manifest.VolumeInfo()
vi.set_info(vol_num, *get_indicies(tarblock_iter))
vi.set_hash("SHA1", gpg.get_hash("SHA1", tdp))
mf.add_volume_info(vi)
# Checkpoint after each volume so restart has a place to restart.
# Note that until after the first volume, all files are temporary.
if vol_num == 1:
sig_outfp.to_partial()
man_outfp.to_partial()
else:
sig_outfp.flush()
man_outfp.flush()
async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename, vol_num: put(tdp, dest_filename, vol_num),
(tdp, dest_filename, vol_num)))
# Log human-readable version as well as raw numbers for machine consumers
log.Progress('Processed volume %d' % vol_num, diffdir.stats.SourceFileSize)
# for testing purposes only - assert on inc or full
assert globals.fail_on_volume != vol_num, "Forced assertion for testing at volume %d" % vol_num
# Collect byte count from all asynchronous jobs; also implicitly waits
# for them all to complete.
for waiter in async_waiters:
bytes_written += waiter()
# Upload the collection summary.
#bytes_written += write_manifest(mf, backup_type, backend)
return bytes_written
def get_man_fileobj(backup_type):
"""
Return a fileobj opened for writing, save results as manifest
Save manifest in globals.archive_dir gzipped.
Save them on the backend encrypted as needed.
@type man_type: string
@param man_type: either "full" or "new"
@rtype: fileobj
@return: fileobj opened for writing
"""
assert backup_type == "full" or backup_type == "inc"
part_man_filename = file_naming.get(backup_type,
manifest=True,
partial=True)
perm_man_filename = file_naming.get(backup_type,
manifest=True)
remote_man_filename = file_naming.get(backup_type,
manifest=True,
encrypted=globals.encryption)
fh = dup_temp.get_fileobj_duppath(globals.archive_dir,
part_man_filename,
perm_man_filename,
remote_man_filename)
return fh
def get_sig_fileobj(sig_type):
"""
Return a fileobj opened for writing, save results as signature
Save signatures in globals.archive_dir gzipped.
Save them on the backend encrypted as needed.
@type sig_type: string
@param sig_type: either "full-sig" or "new-sig"
@rtype: fileobj
@return: fileobj opened for writing
"""
assert sig_type in ["full-sig", "new-sig"]
part_sig_filename = file_naming.get(sig_type,
gzipped=False,
partial=True)
perm_sig_filename = file_naming.get(sig_type,
gzipped=True)
remote_sig_filename = file_naming.get(sig_type, encrypted=globals.encryption,
gzipped=globals.compression)
fh = dup_temp.get_fileobj_duppath(globals.archive_dir,
part_sig_filename,
perm_sig_filename,
remote_sig_filename)
return fh
def full_backup(col_stats):
"""
Do full backup of directory to backend, using archive_dir
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
if globals.dry_run:
tarblock_iter = diffdir.DirFull(globals.select)
bytes_written = dummy_backup(tarblock_iter)
col_stats.set_values(sig_chain_warning=None)
else:
sig_outfp = get_sig_fileobj("full-sig")
man_outfp = get_man_fileobj("full")
tarblock_iter = diffdir.DirFull_WriteSig(globals.select,
sig_outfp)
bytes_written = write_multivol("full", tarblock_iter,
man_outfp, sig_outfp,
globals.backend)
# close sig file, send to remote, and rename to final
sig_outfp.close()
sig_outfp.to_remote()
sig_outfp.to_final()
# close manifest, send to remote, and rename to final
man_outfp.close()
man_outfp.to_remote()
man_outfp.to_final()
col_stats.set_values(sig_chain_warning=None)
print_statistics(diffdir.stats, bytes_written)
def check_sig_chain(col_stats):
"""
Get last signature chain for inc backup, or None if none available
@type col_stats: CollectionStatus object
@param col_stats: collection status
"""
if not col_stats.matched_chain_pair:
if globals.incremental:
log.FatalError(_("Fatal Error: Unable to start incremental backup. "
"Old signatures not found and incremental specified"),
log.ErrorCode.inc_without_sigs)
else:
log.Warn(_("No signatures found, switching to full backup."))
return None
return col_stats.matched_chain_pair[0]
def print_statistics(stats, bytes_written):
"""
If globals.print_statistics, print stats after adding bytes_written
@rtype: void
@return: void
"""
if globals.print_statistics:
diffdir.stats.TotalDestinationSizeChange = bytes_written
print
print diffdir.stats.get_stats_logstring(_("Backup Statistics"))
def incremental_backup(sig_chain):
"""
Do incremental backup of directory to backend, using archive_dir
@rtype: void
@return: void
"""
if not globals.restart:
dup_time.setprevtime(sig_chain.end_time)
if dup_time.curtime == dup_time.prevtime:
time.sleep(2)
dup_time.setcurtime()
assert dup_time.curtime != dup_time.prevtime, "time not moving forward at appropriate pace - system clock issues?"
if globals.dry_run:
tarblock_iter = diffdir.DirDelta(globals.select,
sig_chain.get_fileobjs())
bytes_written = dummy_backup(tarblock_iter)
else:
new_sig_outfp = get_sig_fileobj("new-sig")
new_man_outfp = get_man_fileobj("inc")
tarblock_iter = diffdir.DirDelta_WriteSig(globals.select,
sig_chain.get_fileobjs(),
new_sig_outfp)
bytes_written = write_multivol("inc", tarblock_iter,
new_man_outfp, new_sig_outfp,
globals.backend)
# close sig file and rename to final
new_sig_outfp.close()
new_sig_outfp.to_remote()
new_sig_outfp.to_final()
# close manifest and rename to final
new_man_outfp.close()
new_man_outfp.to_remote()
new_man_outfp.to_final()
print_statistics(diffdir.stats, bytes_written)
def list_current(col_stats):
"""
List the files current in the archive (examining signature only)
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
time = globals.restore_time or dup_time.curtime
sig_chain = col_stats.get_signature_chain_at_time(time)
path_iter = diffdir.get_combined_path_iter(sig_chain.get_fileobjs(time))
for path in path_iter:
if path.difftype != "deleted":
user_info = "%s %s" % (dup_time.timetopretty(path.getmtime()),
path.get_relative_path())
log_info = "%s %s" % (dup_time.timetostring(path.getmtime()),
util.escape(path.get_relative_path()))
log.Log(user_info, log.INFO, log.InfoCode.file_list,
log_info, True)
def restore(col_stats):
"""
Restore archive in globals.backend to globals.local_path
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
if globals.dry_run:
return
if not patchdir.Write_ROPaths(globals.local_path,
restore_get_patched_rop_iter(col_stats)):
if globals.restore_dir:
log.FatalError(_("%s not found in archive, no files restored.")
% (globals.restore_dir,),
log.ErrorCode.restore_dir_not_found)
else:
log.FatalError(_("No files found in archive - nothing restored."),
log.ErrorCode.no_restore_files)
def restore_get_patched_rop_iter(col_stats):
"""
Return iterator of patched ROPaths of desired restore data
@type col_stats: CollectionStatus object
@param col_stats: collection status
"""
if globals.restore_dir:
index = tuple(globals.restore_dir.split("/"))
else:
index = ()
time = globals.restore_time or dup_time.curtime
backup_chain = col_stats.get_backup_chain_at_time(time)
assert backup_chain, col_stats.all_backup_chains
backup_setlist = backup_chain.get_sets_at_time(time)
num_vols = 0
for s in backup_setlist:
num_vols += len(s)
cur_vol = [0]
def get_fileobj_iter(backup_set):
"""Get file object iterator from backup_set contain given index"""
manifest = backup_set.get_manifest()
volumes = manifest.get_containing_volumes(index)
for vol_num in volumes:
yield restore_get_enc_fileobj(backup_set.backend,
backup_set.volume_name_dict[vol_num],
manifest.volume_info_dict[vol_num])
cur_vol[0] += 1
log.Progress(_('Processed volume %d of %d') % (cur_vol[0], num_vols),
cur_vol[0], num_vols)
fileobj_iters = map(get_fileobj_iter, backup_setlist)
tarfiles = map(patchdir.TarFile_FromFileobjs, fileobj_iters)
return patchdir.tarfiles2rop_iter(tarfiles, index)
def restore_get_enc_fileobj(backend, filename, volume_info):
"""
Return plaintext fileobj from encrypted filename on backend
If volume_info is set, the hash of the file will be checked,
assuming some hash is available. Also, if globals.sign_key is
set, a fatal error will be raised if file not signed by sign_key.
"""
parseresults = file_naming.parse(filename)
tdp = dup_temp.new_tempduppath(parseresults)
backend.get(filename, tdp)
""" verify hash of the remote file """
verified, hash_pair, calculated_hash = restore_check_hash(volume_info, tdp)
if not verified:
log.FatalError("%s\n %s\n %s\n %s\n" %
(_("Invalid data - %s hash mismatch for file:") % hash_pair[0],
filename,
_("Calculated hash: %s") % calculated_hash,
_("Manifest hash: %s") % hash_pair[1]),
log.ErrorCode.mismatched_hash)
fileobj = tdp.filtered_open_with_delete("rb")
if parseresults.encrypted and globals.gpg_profile.sign_key:
restore_add_sig_check(fileobj)
return fileobj
def restore_check_hash(volume_info, vol_path):
"""
Check the hash of vol_path path against data in volume_info
@rtype: boolean
@return: true (verified) / false (failed)
"""
hash_pair = volume_info.get_best_hash()
if hash_pair:
calculated_hash = gpg.get_hash(hash_pair[0], vol_path)
if calculated_hash != hash_pair[1]:
return False, hash_pair, calculated_hash
""" reached here, verification passed """
return True, hash_pair, calculated_hash
def restore_add_sig_check(fileobj):
"""
Require signature when closing fileobj matches sig in gpg_profile
@rtype: void
@return: void
"""
assert (isinstance(fileobj, dup_temp.FileobjHooked) and
isinstance(fileobj.fileobj, gpg.GPGFile)), fileobj
def check_signature():
"""Thunk run when closing volume file"""
actual_sig = fileobj.fileobj.get_signature()
if actual_sig != globals.gpg_profile.sign_key:
log.FatalError(_("Volume was signed by key %s, not %s") %
(actual_sig, globals.gpg_profile.sign_key),
log.ErrorCode.unsigned_volume)
fileobj.addhook(check_signature)
def verify(col_stats):
"""
Verify files, logging differences
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
global exit_val
collated = diffdir.collate2iters(restore_get_patched_rop_iter(col_stats),
globals.select)
diff_count = 0; total_count = 0
for backup_ropath, current_path in collated:
if not backup_ropath:
backup_ropath = path.ROPath(current_path.index)
if not current_path:
current_path = path.ROPath(backup_ropath.index)
if not backup_ropath.compare_verbose(current_path):
diff_count += 1
total_count += 1
# Unfortunately, ngettext doesn't handle multiple number variables, so we
# split up the string.
log.Notice(_("Verify complete: %s, %s.") %
(gettext.ngettext("%d file compared",
"%d files compared", total_count) % total_count,
gettext.ngettext("%d difference found",
"%d differences found", diff_count) % diff_count))
if diff_count >= 1:
exit_val = 1
def cleanup(col_stats):
"""
Delete the extraneous files in the current backend
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
ext_local, ext_remote = col_stats.get_extraneous(globals.extra_clean)
extraneous = ext_local + ext_remote
if not extraneous:
log.Warn(_("No extraneous files found, nothing deleted in cleanup."))
return
filestr = "\n".join(extraneous)
if globals.force:
log.Notice(gettext.ngettext("Deleting this file from backend:",
"Deleting these files from backend:",
len(extraneous))
+ "\n" + filestr)
if not globals.dry_run:
col_stats.backend.delete(ext_remote)
for fn in ext_local:
try:
globals.archive_dir.append(fn).delete()
except Exception:
pass
else:
log.Notice(gettext.ngettext("Found the following file to delete:",
"Found the following files to delete:",
len(extraneous))
+ "\n" + filestr + "\n"
+ _("Run duplicity again with the --force option to actually delete."))
def remove_all_but_n_full(col_stats):
"""
Remove backup files older than the last n full backups.
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
assert globals.keep_chains is not None
globals.remove_time = col_stats.get_nth_last_full_backup_time(globals.keep_chains)
remove_old(col_stats)
def remove_old(col_stats):
"""
Remove backup files older than globals.remove_time from backend
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
assert globals.remove_time is not None
def set_times_str(setlist):
"""Return string listing times of sets in setlist"""
return "\n".join(map(lambda s: dup_time.timetopretty(s.get_time()),
setlist))
req_list = col_stats.get_older_than_required(globals.remove_time)
if req_list:
log.Warn("%s\n%s\n%s" %
(_("There are backup set(s) at time(s):"),
set_times_str(req_list),
_("Which can't be deleted because newer sets depend on them.")))
if (col_stats.matched_chain_pair and
col_stats.matched_chain_pair[1].end_time < globals.remove_time):
log.Warn(_("Current active backup chain is older than specified time. "
"However, it will not be deleted. To remove all your backups, "
"manually purge the repository."))
setlist = col_stats.get_older_than(globals.remove_time)
if not setlist:
log.Notice(_("No old backup sets found, nothing deleted."))
return
if globals.force:
log.Notice(gettext.ngettext("Deleting backup set at time:",
"Deleting backup sets at times:",
len(setlist)) +
"\n" + set_times_str(setlist))
setlist.reverse() # save oldest for last
for set in setlist:
# if remove_all_inc_of_but_n_full_mode mode, remove only incrementals one and not full
if globals.dry_run:
log.Notice("(Not: dry-run) Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time()))
else:
if globals.remove_all_inc_of_but_n_full_mode and (set.type != "inc") :
log.Notice("Not deleting set " + set.type + " " + dup_time.timetopretty(set.get_time()))
else :
log.Notice("Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time()))
set.delete()
col_stats.set_values(sig_chain_warning=None)
else:
log.Notice(gettext.ngettext("Found old backup set at the following time:",
"Found old backup sets at the following times:",
len(setlist)) +
"\n" + set_times_str(setlist) + "\n" +
_("Rerun command with --force option to actually delete."))
def sync_archive(decrypt):
"""
Synchronize local archive manifest file and sig chains to remote archives.
Copy missing files from remote to local as needed to make sure the local
archive is synchronized to remote storage.
@rtype: void
@return: void
"""
suffixes = [".g", ".gpg", ".z", ".gz", ".part"]
def get_metafiles(filelist):
"""
Return metafiles of interest from the file list.
Files of interest are:
sigtar - signature files
manifest - signature files
duplicity partial versions of the above
Files excluded are:
non-duplicity files
@rtype: list
@return: list of duplicity metadata files
"""
metafiles = {}
partials = {}
need_passphrase = False
for fn in filelist:
pr = file_naming.parse(fn)
if not pr:
continue
if pr.encrypted:
need_passphrase = True
if pr.type in ["full-sig", "new-sig"] or pr.manifest:
base, ext = os.path.splitext(fn)
if ext not in suffixes:
base = fn
if pr.partial:
partials[base] = fn
else:
metafiles[base] = fn
return metafiles, partials, need_passphrase
def copy_raw(src_iter, filename):
"""
Copy data from src_iter to file at fn
"""
block_size = 128 * 1024
file = open(filename, "wb")
while True:
try:
data = src_iter.next(block_size).data
except StopIteration:
break
file.write(data)
file.close()
def resolve_basename(fn):
"""
@return: (parsedresult, local_name, remote_name)
"""
pr = file_naming.parse(fn)
base, ext = os.path.splitext(fn)
if ext not in suffixes:
base = fn
suffix = file_naming.get_suffix(False, not pr.manifest)
loc_name = base + suffix
return (pr, loc_name, fn)
def remove_local(fn):
del_name = globals.archive_dir.append(fn).name
log.Notice(_("Deleting local %s (not authoritative at backend).") % del_name)
try:
util.ignore_missing(os.unlink, del_name)
except Exception, e:
log.Warn(_("Unable to delete %s: %s") % (del_name, str(e)))
def copy_to_local(fn):
"""
Copy remote file fn to local cache.
"""
class Block:
"""
Data block to return from SrcIter
"""
def __init__(self, data):
self.data = data
class SrcIter:
"""
Iterate over source and return Block of data.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
def next(self, size):
try:
res = Block(self.fileobj.read(size))
except Exception:
if hasattr(self.fileobj, 'name'):
name = self.fileobj.name
else:
name = None
log.FatalError(_("Failed to read %s: %s") %
(name, sys.exc_info()),
log.ErrorCode.generic)
if not res.data:
self.fileobj.close()
raise StopIteration
return res
def get_footer(self):
return ""
log.Notice(_("Copying %s to local cache.") % fn)
pr, loc_name, rem_name = resolve_basename(fn)
fileobj = globals.backend.get_fileobj_read(fn)
src_iter = SrcIter(fileobj)
tdp = dup_temp.new_tempduppath(file_naming.parse(loc_name))
if pr.manifest:
copy_raw(src_iter, tdp.name)
else:
gpg.GzipWriteFile(src_iter, tdp.name, size=sys.maxint)
tdp.setdata()
tdp.move(globals.archive_dir.append(loc_name))
# get remote metafile list
remlist = globals.backend.list()
remote_metafiles, ignored, rem_needpass = get_metafiles(remlist)
# get local metafile list
loclist = globals.archive_dir.listdir()
local_metafiles, local_partials, loc_needpass = get_metafiles(loclist)
# we have the list of metafiles on both sides. remote is always
# authoritative. figure out which are local spurious (should not
# be there) and missing (should be there but are not).
local_keys = local_metafiles.keys()
remote_keys = remote_metafiles.keys()
local_missing = []
local_spurious = []
for key in remote_keys:
# If we lost our cache, re-get the remote file. But don't do it if we
# already have a local partial. The local partial will already be
# complete in this case (seems we got interrupted before we could move
# it to its final location).
if key not in local_keys and key not in local_partials:
local_missing.append(remote_metafiles[key])
for key in local_keys:
# If we have a file locally that is unnecessary, delete it. Also
# delete final versions of partial files because if we have both, it
# means the write of the final version got interrupted.
if key not in remote_keys or key in local_partials:
local_spurious.append(local_metafiles[key])
# finally finish the process
if not local_missing and not local_spurious:
log.Notice(_("Local and Remote metadata are synchronized, no sync needed."))
else:
local_missing.sort()
local_spurious.sort()
if not globals.dry_run:
log.Notice(_("Synchronizing remote metadata to local cache..."))
if local_missing and (rem_needpass or loc_needpass):
if decrypt:
# password for the --encrypt-key
globals.gpg_profile.passphrase = get_passphrase(1, "sync")
else:
local_missing = [] # don't download if we can't decrypt
for fn in local_spurious:
remove_local(fn)
for fn in local_missing:
copy_to_local(fn)
else:
if local_missing:
log.Notice(_("Sync would copy the following from remote to local:")
+ "\n" + "\n".join(local_missing))
if local_spurious:
log.Notice(_("Sync would remove the following spurious local files:")
+ "\n" + "\n".join(local_spurious))
def check_last_manifest(col_stats):
"""
Check consistency and hostname/directory of last manifest
@type col_stats: CollectionStatus object
@param col_stats: collection status
@rtype: void
@return: void
"""
if not col_stats.all_backup_chains:
return
last_backup_set = col_stats.all_backup_chains[-1].get_last()
last_backup_set.check_manifests()
def check_resources(action):
"""
Check for sufficient resources:
- temp space for volume build
- enough max open files
Put out fatal error if not sufficient to run
@type action: string
@param action: action in progress
@rtype: void
@return: void
"""
if action in ["full", "inc", "restore"]:
# Make sure we have enough resouces to run
# First check disk space in temp area.
tempfile, tempname = tempdir.default().mkstemp()
os.close(tempfile)
# strip off the temp dir and file
tempfs = os.path.sep.join(tempname.split(os.path.sep)[:-2])
try:
stats = os.statvfs(tempfs)
except Exception:
log.FatalError(_("Unable to get free space on temp."),
log.ErrorCode.get_freespace_failed)
# Calculate space we need for at least 2 volumes of full or inc
# plus about 30% of one volume for the signature files.
freespace = stats[statvfs.F_FRSIZE] * stats[statvfs.F_BAVAIL]
needspace = (((globals.async_concurrency + 1) * globals.volsize)
+ int(0.30 * globals.volsize))
if freespace < needspace:
log.FatalError(_("Temp space has %d available, backup needs approx %d.") %
(freespace, needspace), log.ErrorCode.not_enough_freespace)
else:
log.Debug(_("Temp has %d available, backup will use approx %d.") %
(freespace, needspace))
# Some environments like Cygwin run with an artificially
# low value for max open files. Check for safe number.
try:
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
except resource.error:
log.FatalError(_("Unable to get max open files."),
log.ErrorCode.get_ulimit_failed)
maxopen = min([l for l in (soft, hard) if l > -1])
if maxopen < 1024:
log.FatalError(_("Max open files of %s is too low, should be >= 1024.\n"
"Use 'ulimit -n 1024' or higher to correct.\n") % (maxopen,),
log.ErrorCode.maxopen_too_low)
def log_startup_parms(verbosity=log.DEBUG):
"""
log Python, duplicity, and system versions
"""
log.Log('=' * 80, verbosity)
log.Log("duplicity 0.6.18 (February 29, 2012)", verbosity)
log.Log("Args: %s" % (' '.join(sys.argv),), verbosity)
log.Log(' '.join(platform.uname()), verbosity)
log.Log("%s %s" % (sys.executable or sys.platform, sys.version), verbosity)
log.Log('=' * 80, verbosity)
class Restart:
"""
Class to aid in restart of inc or full backup.
Instance in globals.restart if restart in progress.
"""
def __init__(self, last_backup):
self.type = None
self.start_time = None
self.end_time = None
self.start_vol = None
self.last_index = None
self.last_block = None
self.last_backup = last_backup
self.setParms(last_backup)
def setParms(self, last_backup):
if last_backup.time:
self.type = "full"
self.time = last_backup.time
else:
self.type = "inc"
self.end_time = last_backup.end_time
self.start_time = last_backup.start_time
# We start one volume back in case we weren't able to finish writing
# the most recent block. Actually checking if we did (via hash) would
# involve downloading the block. Easier to just redo one block.
self.start_vol = max(len(last_backup) - 1, 0)
def checkManifest(self, mf):
mf_len = len(mf.volume_info_dict)
if (mf_len != self.start_vol) or not (mf_len and self.start_vol):
if self.start_vol == 0:
# upload of 1st vol failed, clean and restart
log.Notice(_("RESTART: The first volume failed to upload before termination.\n"
" Restart is impossible...starting backup from beginning."))
self.last_backup.delete()
os.execve(sys.argv[0], sys.argv, os.environ)
elif mf_len - self.start_vol > 0:
# upload of N vols failed, fix manifest and restart
log.Notice(_("RESTART: Volumes %d to %d failed to upload before termination.\n"
" Restarting backup at volume %d.") %
(self.start_vol + 1, mf_len, self.start_vol + 1))
for vol in range(self.start_vol + 1, mf_len + 1):
mf.del_volume_info(vol)
else:
# this is an 'impossible' state, remove last partial and restart
log.Notice(_("RESTART: Impossible backup state: manifest has %d vols, remote has %d vols.\n"
" Restart is impossible ... duplicity will clean off the last partial\n"
" backup then restart the backup from the beginning.") %
(mf_len, self.start_vol))
self.last_backup.delete()
os.execve(sys.argv[0], sys.argv[1:], os.environ)
def setLastSaved(self, mf):
vi = mf.volume_info_dict[self.start_vol]
self.last_index = vi.end_index
self.last_block = vi.end_block or 0
def main():
"""
Start/end here
"""
# per bug https://bugs.launchpad.net/duplicity/+bug/931175
# duplicity crashes when PYTHONOPTIMIZE is set, so check
# and refuse to run if it is set.
if 'PYTHONOPTIMIZE' in os.environ:
log.FatalError(
"""
PYTHONOPTIMIZE in the environment causes duplicity to fail to
recognize its own backups. Please remove PYTHONOPTIMIZE from
the environment and rerun the backup.
See https://bugs.launchpad.net/duplicity/+bug/931175
""", log.ErrorCode.pythonoptimize_set)
# if python is run setuid, it's only partway set,
# so make sure to run with euid/egid of root
if os.geteuid() == 0:
# make sure uid/gid match euid/egid
os.setuid(os.geteuid())
os.setgid(os.getegid())
# set the current time strings (make it available for command line processing)
dup_time.setcurtime()
# determine what action we're performing and process command line
action = commandline.ProcessCommandLine(sys.argv[1:])
# The following is for starting remote debugging in Eclipse with Pydev.
# Adjust the path to your location and version of Eclipse and Pydev.
if globals.pydevd:
pysrc = "/opt/Aptana Studio 3/plugins/org.python.pydev.debug_2.2.4.2011111522/pysrc"
sys.path.append(pysrc)
import pydevd #@UnresolvedImport
pydevd.settrace()
# end remote debugger startup
# set the current time strings again now that we have time separator
if globals.current_time:
dup_time.setcurtime(globals.current_time)
else:
dup_time.setcurtime()
# log some debugging status info
log_startup_parms(log.DEBUG)
# check for disk space and available file handles
check_resources(action)
# check archive synch with remote, fix if needed
decrypt = action not in ["collection-status"]
sync_archive(decrypt)
# get current collection status
col_stats = collections.CollectionsStatus(globals.backend,
globals.archive_dir).set_values()
while True:
# if we have to clean up the last partial, then col_stats are invalidated
# and we have to start the process all over again until clean.
if action in ["full", "inc", "cleanup"]:
last_full_chain = col_stats.get_last_backup_chain()
if not last_full_chain:
break
last_backup = last_full_chain.get_last()
if last_backup.partial:
if action in ["full", "inc"]:
# set restart parms from last_backup info
globals.restart = Restart(last_backup)
# (possibly) reset action
action = globals.restart.type
# reset the time strings
if action == "full":
dup_time.setcurtime(globals.restart.time)
else:
dup_time.setcurtime(globals.restart.end_time)
dup_time.setprevtime(globals.restart.start_time)
# log it -- main restart heavy lifting is done in write_multivol
log.Notice(_("Last %s backup left a partial set, restarting." % action))
break
else:
# remove last partial backup and get new collection status
log.Notice(_("Cleaning up previous partial %s backup set, restarting." % action))
last_backup.delete()
col_stats = collections.CollectionsStatus(globals.backend,
globals.archive_dir).set_values()
continue
break
break
# OK, now we have a stable collection
last_full_time = col_stats.get_last_full_backup_time()
if last_full_time > 0:
log.Notice(_("Last full backup date:") + " " + dup_time.timetopretty(last_full_time))
else:
log.Notice(_("Last full backup date: none"))
if not globals.restart and action == "inc" and last_full_time < globals.full_force_time:
log.Notice(_("Last full backup is too old, forcing full backup"))
action = "full"
log.PrintCollectionStatus(col_stats)
os.umask(077)
# full/inc only needs a passphrase for symmetric keys
if not action in ["full", "inc"] or not globals.gpg_profile.recipients:
# get the passphrase if we need to based on action/options
globals.gpg_profile.passphrase = get_passphrase(1, action)
if action == "restore":
restore(col_stats)
elif action == "verify":
verify(col_stats)
elif action == "list-current":
list_current(col_stats)
elif action == "collection-status":
log.PrintCollectionStatus(col_stats, True)
elif action == "cleanup":
cleanup(col_stats)
elif action == "remove-old":
remove_old(col_stats)
elif action == "remove-all-but-n-full" or action == "remove-all-inc-of-but-n-full":
remove_all_but_n_full(col_stats)
elif action == "sync":
sync_archive(True)
else:
assert action == "inc" or action == "full", action
# the passphrase for full and inc is used by --sign-key
# the sign key can have a different passphrase than the encrypt
# key, therefore request a passphrase
if globals.gpg_profile.sign_key:
globals.gpg_profile.signing_passphrase = get_passphrase(1, action, True)
# if there are no recipients (no --encrypt-key), it must be a
# symmetric key. Therefore, confirm the passphrase
if not globals.gpg_profile.recipients:
globals.gpg_profile.passphrase = get_passphrase(2, action)
# a limitation in the GPG implementation does not allow for
# inputting different passphrases, this affects symmetric+sign.
# Allow an empty passphrase for the key though to allow a non-empty
# symmetric key
if (globals.gpg_profile.signing_passphrase and
globals.gpg_profile.passphrase != globals.gpg_profile.signing_passphrase):
log.FatalError("When using symmetric encryption, the signing passphrase must equal the encryption passphrase.", log.ErrorCode.user_error)
if action == "full":
full_backup(col_stats)
else: # attempt incremental
sig_chain = check_sig_chain(col_stats)
# action == "inc" was requested, but no full backup is available
if not sig_chain:
full_backup(col_stats)
else:
if not globals.restart:
# only ask for a passphrase if there was a previous backup
if col_stats.all_backup_chains:
globals.gpg_profile.passphrase = get_passphrase(1, action)
check_last_manifest(col_stats) # not needed for full backup
incremental_backup(sig_chain)
globals.backend.close()
log.shutdown()
if exit_val is not None:
sys.exit(exit_val)
def with_tempdir(fn):
"""
Execute function and guarantee cleanup of tempdir is called
@type fn: callable function
@param fn: function to execute
@return: void
@rtype: void
"""
try:
fn()
finally:
tempdir.default().cleanup()
if __name__ == "__main__":
try:
with_tempdir(main)
# Don't move this lower. In order to get an exit
# status out of the system, you have to call the
# sys.exit() function. Python handles this by
# raising the SystemExit exception. Cleanup code
# goes here, if needed.
except SystemExit, e:
# No traceback, just get out
sys.exit(e)
except KeyboardInterrupt, e:
# No traceback, just get out
log.Info(_("INT intercepted...exiting."))
sys.exit(4)
except gpg.GPGError, e:
# For gpg errors, don't show an ugly stack trace by
# default. But do with sufficient verbosity.
log.Info(_("GPG error detail: %s")
% (''.join(traceback.format_exception(*sys.exc_info()))))
log.FatalError("%s: %s" % (e.__class__.__name__, e.args[0]),
log.ErrorCode.gpg_failed,
e.__class__.__name__)
except duplicity.errors.UserError, e:
# For user errors, don't show an ugly stack trace by
# default. But do with sufficient verbosity.
log.Info(_("User error detail: %s")
% (''.join(traceback.format_exception(*sys.exc_info()))))
log.FatalError("%s: %s" % (e.__class__.__name__, str(e)),
log.ErrorCode.user_error,
e.__class__.__name__)
except duplicity.errors.BackendException, e:
# For backend errors, don't show an ugly stack trace by
# default. But do with sufficient verbosity.
log.Info(_("Backend error detail: %s")
% (''.join(traceback.format_exception(*sys.exc_info()))))
log.FatalError("%s: %s" % (e.__class__.__name__, str(e)),
log.ErrorCode.user_error,
e.__class__.__name__)
except Exception, e:
if "Forced assertion for testing" in str(e):
log.FatalError("%s: %s" % (e.__class__.__name__, str(e)),
log.ErrorCode.exception,
e.__class__.__name__)
else:
# Traceback and that mess
log.FatalError("%s" % (''.join(traceback.format_exception(*sys.exc_info()))),
log.ErrorCode.exception,
e.__class__.__name__)