--- duplicity-0.6.15.orig/debian/compat +++ duplicity-0.6.15/debian/compat @@ -0,0 +1 @@ +5 --- duplicity-0.6.15.orig/debian/docs +++ duplicity-0.6.15/debian/docs @@ -0,0 +1,2 @@ +README +debian/NEWS.Debian --- duplicity-0.6.15.orig/debian/copyright +++ duplicity-0.6.15/debian/copyright @@ -0,0 +1,83 @@ +This package was originally debianized by Martin Wuertele +in 2003. Since April 2007 it is maintained +by Alexander Zangerl . + +It was downloaded from http://www.nongnu.org/duplicity/ + +Upstream Authors: + Ben Escoto (duplicity author) + Jiri Tyr (sftp) + intrigeri (sftp-command) + Mathias Wagner (compress) + Joey Hess (bashishm) + + +Copyright: + +duplicity: + +Copyright 2002, 2003, 2004, 2005, 2006 Ben Escoto + 2005 Jiri Tyr + 2006 intrigeri + 2006 Mathias Wagner + 2006 Joey Hess + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; either version 2, or (at your option) any later version. + +On Debian GNU/Linux systems, the complete text of the GNU General Public +License can be found in `/usr/share/common-licenses/GPL'. + + +tarfile: + +Copyright (C) 2002 Lars Gustäbel +All rights reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +gnupginterface: + +COPYRIGHT: + +Copyright (C) 2001 Frank J. Tobin, ftobin@neverending.org + +LICENSE: + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. +or see http://www.gnu.org/copyleft/lesser.html + + + --- duplicity-0.6.15.orig/debian/NEWS.Debian +++ duplicity-0.6.15/debian/NEWS.Debian @@ -0,0 +1,64 @@ +duplicity (0.6.08b-1) unstable; urgency=low + + With 0.6.06 duplicity stopped removing old data properly, + EXCEPT when one ran a cleanup option with --extra-clean enabled. + Note that normal remove* ops are not sufficient for a proper clean. + + (the cause is changeset 616, lp:~mterry/duplicity/list-old-chains) + + This has lead to numerous problems wrt. the archive dir cache growing + without bounds as well as some cache desynchronization issues. + It's also extremely counter-intuitive: despite requesting removals + not enough data is removed. + + Until upstream resolves this problem properly, the Debian version + of duplicity now automatically and unconditionally runs a + cleanup operation after a successful remove-older-than or + remove-all-but-n-full operation. + + The definition of "successful" in this context: --force was enabled, + and the remove op found something to remove. + + This forced cleanup is run with --extra-clean active. + +duplicity (0.6.04-1) unstable; urgency=low + + The --archive-dir handling has changed substantially in 0.6, + in ways that affect existing backups. + + Duplicity now requires an archive dir, and if you don't give it one + explicitly it will use ~/.cache/duplicy/. + + To distinguish between multiple backups, a per-backup subdirectory + of the archive dir is used. This suffix is a hash of the target url + or can be set with --name. + + The suffix is ALWAYS ADDED, the archive dir itself is no longer used. + + Consequences: + + * If you have existing backups with an archive dir (where you had + to specify unique archive dirs), you must add an + appropriate --name to have duplicity use the right archive directory. + + Using your existing, specific --archive-dir and --name '' works. + + * If you do not do that or if you have no existing archive dir, + then duplicity will create a new archive dir and + synchronize/recreate the archive dir content from the remote repository. + + If you use encryption then the first duplicity run (attempting this + resynchronization) will fail unless you give it the encryption passphrase + (or access to and passphrase of the relevant gnupg key) - local + archive dir contents are not encrypted but remote repositories are. + + For existing backups I'd highly recommend that you run a + collection-status first, with the appropriate --archive-dir and --name. + It may pay off to ls the archive dir afterwards, confirming that no + unintended --name subdirs have been created. + + After that step any required resynchronizations should be complete and + duplicity should again work fine for unattended backups with or without + encryption. + + -- Alexander Zangerl Fri, 31 Jul 2009 10:50:30 +1000 \ No newline at end of file --- duplicity-0.6.15.orig/debian/rules +++ duplicity-0.6.15/debian/rules @@ -0,0 +1,94 @@ +#!/usr/bin/make -f +# Sample debian/rules that uses debhelper. +# GNU copyright 1997 to 1999 by Joey Hess. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +PREFIX := debian/duplicity/usr + + +ifneq (,$(findstring debug,$(DEB_BUILD_OPTIONS))) + CFLAGS += -g +endif +ifeq (,$(findstring nostrip,$(DEB_BUILD_OPTIONS))) + INSTALL_PROGRAM += -s +endif + + +# we use dpatch +include /usr/share/dpatch/dpatch.make + + +configure: configure-stamp +configure-stamp: + dh_testdir +# Add here commands to configure the package. + touch configure-stamp + + +build: build-stamp + +build-stamp: patch configure-stamp + dh_testdir + +# Add here commands to compile the package. + python setup.py build + touch build-stamp + +clean: clean1 unpatch +clean1: + dh_testdir + dh_testroot + rm -f build-stamp configure-stamp + +# Add here commands to clean up after the build process. + rm -rf *.pyc + rm -rf build + dh_clean + +install: build + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + +# Add here commands to install the package into debian/duplicity. + python setup.py install --prefix=$(PREFIX) --no-compile --install-layout=deb + +# remove local GnuPGInterface.py as we use the packaged version + rm -f $(PREFIX)/lib/python*/*-packages/duplicity/GnuPGInterface.py +# no more local pexpect + rm -f $(PREFIX)/lib/python*/*-packages/duplicity/pexpect.py + +# remove docs as we create them with debhelper + rm -rf $(PREFIX)/share/doc/duplicity-* + + +# Build architecture-independent files here. +binary-indep: build install + +# Build architecture-dependent files here. +binary-arch: build install + # for some odd reason dh_python2 always dies with "must build for 2.5" + # unless the version guessing is switched off... + dh_python2 --no-guessing-versions + dh_testdir + dh_testroot + dh_installdocs + dh_installexamples + dh_installman + dh_installchangelogs CHANGELOG + dh_link + dh_strip + dh_compress + dh_fixperms + dh_makeshlibs + dh_installdeb + dh_shlibdeps + dh_gencontrol + dh_md5sums + dh_builddeb + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install configure --- duplicity-0.6.15.orig/debian/changelog +++ duplicity-0.6.15/debian/changelog @@ -0,0 +1,594 @@ +duplicity (0.6.15-0ubuntu2) oneiric; urgency=low + + * Backport some upstream fixes + * debian/patches/06_use_passhprase.dpatch: + - Don't prompt for passphrase if PASSPHRASE is set (LP: #836467) + * debian/patches/07_large_rackspace_list.dpatch: + - Support listing more than 10,000 files from Rackspace servers + (LP: #832149) + * debian/patches/08_check_volumes.dpatch: + - Attempt to prevent a data corruption bug that can't be reproduced + currently by detecting it up front and stopping the backup. + + -- Michael Terry Thu, 08 Sep 2011 09:10:57 -0400 + +duplicity (0.6.15-0ubuntu1) oneiric; urgency=low + + * New upstream bug-fix release + * debian/watch: + - Update to look at launchpad.net + * debian/patches/06u1ignores404.dpatch, + debian/patches/07ignoreENOTCONN.dpatch: + - Dropped, applied upstream + + -- Michael Terry Fri, 19 Aug 2011 15:30:23 -0400 + +duplicity (0.6.14-0ubuntu3) oneiric; urgency=low + + * debian/patches/07ignoreENOTCONN.dpatch: + - Patch to ignore ENOTCONN errors which can happen when gvfs-fuse + closes without cleaning up. LP: #794576 + * debian/patches/00list: + - Actually apply 06u1ignores404.dpatch from last upload as well + as the above 07 patch + + -- Michael Terry Thu, 07 Jul 2011 16:09:51 -0400 + +duplicity (0.6.14-0ubuntu2) oneiric; urgency=low + + * debian/patches/06u1ignores404.dpatch: + - Ignore 404 "file not found" errors when trying to delete files + on Ubuntu One. Backported from upstream trunk. + + -- Michael Terry Wed, 22 Jun 2011 15:17:39 -0400 + +duplicity (0.6.14-0ubuntu1) oneiric; urgency=low + + * New upstream release + * debian/patches/01collstatus.dpatch, + debian/patches/06moregrace.dpatch, + debian/patches/07ubuntuone.dpatch, + debian/patches/08levelname.dpatch: + - Dropped, included upstream + + -- Michael Terry Sat, 18 Jun 2011 21:36:07 -0400 + +duplicity (0.6.13-2ubuntu1) oneiric; urgency=low + + * debian/patches/07ubuntuone.dpatch: + - Backported from trunk; adds Ubuntu One backend + * debian/patches/08levelname.dpatch: + - Fixes logging issues introduced by above patch + + -- Michael Terry Mon, 13 Jun 2011 11:53:19 -0400 + +duplicity (0.6.13-2) unstable; urgency=low + + * applied fix to allow collectionstatus op without access to key + (closes: #625645) + + -- Alexander Zangerl Sat, 21 May 2011 17:46:28 +1000 + +duplicity (0.6.13-1) unstable; urgency=low + + * New upstream release + * band-aid for #601584: now the error report is at least more verbose + + -- Alexander Zangerl Thu, 21 Apr 2011 18:28:56 +1000 + +duplicity (0.6.12-1) unstable; urgency=low + + * New upstream release (closes: #615668, #579966) + * switched to dh_python2 (closes: #616797) + * now uses mainstream GnuPGInterface module again + + -- Alexander Zangerl Sun, 13 Mar 2011 00:03:18 +1000 + +duplicity (0.6.09-5) unstable; urgency=low + + * changed homepage field (closes: #599060) + * added patch for problems with rsync 3.0.7++ (closes: #595562) + + -- Alexander Zangerl Sat, 20 Nov 2010 14:37:54 +1000 + +duplicity (0.6.09-4) unstable; urgency=low + + * and again i uploaded the Lenny-built package..sigh. (closes: #594562) + * lifted standards version, updated suggests a bit + + -- Alexander Zangerl Tue, 07 Sep 2010 16:45:19 +1000 + +duplicity (0.6.09-3) unstable; urgency=low + + * add patch to prime option parser with proper arguments + (closes: #595567) + + -- Alexander Zangerl Mon, 06 Sep 2010 12:57:46 +1000 + +duplicity (0.6.09-2) unstable; urgency=high + + * added copyright text for the local, modified, version + of GnuPGInterface (closes: #594532) + * upload version built against python 2.6, making the package + installable in Squeeze (closes: #594562) + + -- Alexander Zangerl Sat, 28 Aug 2010 14:37:44 +1000 + +duplicity (0.6.09-1) unstable; urgency=low + + * New upstream release (closes: #581260, #572102, #531786) + + -- Alexander Zangerl Wed, 25 Aug 2010 23:32:30 +1000 + +duplicity (0.6.08b-1) unstable; urgency=low + + * New upstream release + * backed out patch for upstream bug#497243 from 06-3 and -4, as + that fix was only cosmetic. + This version now enforces extra-clean cleanups on any remove operation, + which both fixes the cache desync issue as well as the accumulation of + old cruft in remote archive and local cache. + NEWS.Debian and manpage have been updated to + mention that behaviour. (closes: #572792) + + -- Alexander Zangerl Mon, 15 Mar 2010 20:52:56 +1000 + +duplicity (0.6.06-5) unstable; urgency=low + + * updated watch file (closes: #573890) + + -- Alexander Zangerl Mon, 15 Mar 2010 19:48:37 +1000 + +duplicity (0.6.06-4) unstable; urgency=low + + * updated fix for upstream bug#497243 to fix one remaining + case where cache desynchronization occurs. + + -- Alexander Zangerl Sun, 28 Feb 2010 11:10:07 +1000 + +duplicity (0.6.06-3) unstable; urgency=low + + * applied fix for upstream bug#497243: cache desynchronization. + + -- Alexander Zangerl Tue, 26 Jan 2010 17:12:18 +1000 + +duplicity (0.6.06-2) unstable; urgency=high + + * fixed ssh backend failure (tried to import local pexpect module) + (closes: #556095) + + -- Alexander Zangerl Mon, 16 Nov 2009 04:48:45 +1000 + +duplicity (0.6.06-1) unstable; urgency=low + + * New upstream release (closes: #539903, #420858) + * does no longer depend on python-gnupginterface: upstream + provides a modified version which is claimed to be incompatible + * does not install a local version of python-pexpect + anymore (closes: #555359) + + -- Alexander Zangerl Fri, 13 Nov 2009 07:30:49 +1000 + +duplicity (0.6.05-2) unstable; urgency=low + + * adjusted rules to cater for future python2.6 install + setup (closes: #547825) + + -- Alexander Zangerl Fri, 25 Sep 2009 11:05:38 +1000 + +duplicity (0.6.05-1) unstable; urgency=low + + * New upstream release + * lifted standards version + + -- Alexander Zangerl Sun, 20 Sep 2009 10:46:40 +1000 + +duplicity (0.6.04-1) unstable; urgency=low + + * New upstream release (closes: #536361, #537260, #42858, + #399371, #388180, #386749 ) + * new project homepage + * added notes regarding changed archive-dir behaviour + + -- Alexander Zangerl Wed, 12 Aug 2009 12:34:01 +1000 + +duplicity (0.5.16-1) unstable; urgency=low + + * New upstream release (closes: #524786) + * removed last remaining debian-local patch + + -- Alexander Zangerl Thu, 23 Apr 2009 14:51:28 +1000 + +duplicity (0.5.11-2) unstable; urgency=low + + * changed the setup to temporarily include upstream's repaired copy of + GnuPGInterface.py: because of #509415 in python-gnupginterface + duplicity currently does not work with public key encryption, + no signing and archive dirs. + + -- Alexander Zangerl Mon, 16 Mar 2009 15:57:01 +1000 + +duplicity (0.5.11-1) unstable; urgency=low + + * New upstream release (closes: #519576) + + -- Alexander Zangerl Sat, 14 Mar 2009 09:14:57 +1000 + +duplicity (0.5.06-2) unstable; urgency=low + + * applied most recent upstream fixes + + -- Alexander Zangerl Sat, 31 Jan 2009 14:31:17 +1000 + +duplicity (0.5.06-1) unstable; urgency=low + + * New upstream release + * built against sid, not etch (closes: #513446) + + -- Alexander Zangerl Fri, 30 Jan 2009 11:32:32 +1000 + +duplicity (0.5.02-2) unstable; urgency=low + + * lifted standards version + * added homepage to control (closes: #512798) + + -- Alexander Zangerl Wed, 28 Jan 2009 11:59:57 +1000 + +duplicity (0.5.02-1) unstable; urgency=low + + * New upstream release (closes: #502207) + + -- Alexander Zangerl Wed, 15 Oct 2008 08:38:15 +1000 + +duplicity (0.4.12-2) unstable; urgency=low + + * applied upstream patch to repair --no-encryption option + (which wrongly requested a passphrase) (closes: #497071) + + -- Alexander Zangerl Sun, 31 Aug 2008 12:24:40 +1000 + +duplicity (0.4.12-1) unstable; urgency=low + + * New upstream release + + -- Alexander Zangerl Thu, 21 Aug 2008 10:49:04 +1000 + +duplicity (0.4.11-2) unstable; urgency=high + + * rebuilt for testing's 2.5 python (closes: #480568) + + -- Alexander Zangerl Sun, 11 May 2008 11:10:01 +1000 + +duplicity (0.4.11-1) unstable; urgency=low + + * New upstream release + * make duplicity accept s3 access credentials from boto config files + and not just the environment (closes: #480417) + + -- Alexander Zangerl Sat, 10 May 2008 11:17:39 +1000 + +duplicity (0.4.10-2) unstable; urgency=low + + * applied patch to work around newer python-boto behaviour + which can make existing S3 backups inaccessible. (closes: #475890) + + -- Alexander Zangerl Tue, 15 Apr 2008 12:46:32 +1000 + +duplicity (0.4.10-1) unstable; urgency=low + + * New upstream release + + -- Alexander Zangerl Mon, 31 Mar 2008 22:19:16 +1000 + +duplicity (0.4.8-1) unstable; urgency=high + + * New upstream release + * fixed backup data corruption for rsync backend + + -- Alexander Zangerl Fri, 21 Dec 2007 17:16:42 +1000 + +duplicity (0.4.7-1) unstable; urgency=low + + * New upstream release (closes: #452700) + + -- Alexander Zangerl Sat, 8 Dec 2007 10:29:26 +1000 + +duplicity (0.4.3-6) unstable; urgency=low + + * fixed some manpage typos (closes: #450881) + + -- Alexander Zangerl Thu, 15 Nov 2007 12:21:48 +1000 + +duplicity (0.4.3-5) unstable; urgency=low + + * minor manpage improvements in response to #447538 + + -- Alexander Zangerl Wed, 24 Oct 2007 12:22:14 +1000 + +duplicity (0.4.3-4) unstable; urgency=low + + * applied Christoph Martin's patch to the ftp backend + to make duplicity cooperate with etch's ncftp (closes: #444972) + + -- Alexander Zangerl Sat, 6 Oct 2007 20:02:43 +1000 + +duplicity (0.4.3-3) unstable; urgency=medium + + * reworked the "no passphrase" patch to properly cover + symmetric encryption, where a passphrase is always needed + (closes: #443803) + + -- Alexander Zangerl Tue, 25 Sep 2007 12:14:26 +1000 + +duplicity (0.4.3-2) unstable; urgency=low + + * now suggests ncftp (closes: #442834) and mentions that in NEWS.Debian + i have decided that Recommends: is too strong here, as ftp is a lousy + protocol which should be avoided as much as possible. + * applied upstream fix for leaking ftp passphrases via the commandline + (closes: #442840). the fix works only with ncftp version 3.2.1 + and newer, which means etch is out. + * applied upstream patch for upstream-#21123, which fixes another + ftp backend problem. + * finally fixed the superfluous passphrase dialogs + * tidied build process for easier integration into ubuntu, removing + some unnecessary python version dependencies + * applied upstream patch for upstream-#6211, restoring strict host key + checks for the ssh backend. + + -- Alexander Zangerl Wed, 19 Sep 2007 22:36:04 +1000 + +duplicity (0.4.3-1) unstable; urgency=low + + * New upstream release (closes: #439057) + this release closes a whole bunch of old and recent debian bugs + bzip2 is now optional (closes: #437694) + the manpage is mostly ok now (closes: #345172) + passphrase handling was overhauled (closes: #370198) + sockets are now cleanly ignored (closes: #246984) + commands are retried for temporary problems (closes: #346306) + * new S3 backend (closes: #384490) + this requires python-boto, which is now listed as suggested + * updated dependencies with python-pexpect + * unattended encrypted backups with archive dir work (closes: #369971, #404345) + * patch set reworked + * added local fix for offending/garbage files prohibiting + further actions (closes: #228388) + * added local fix for better tempfile naming + + -- Alexander Zangerl Sat, 8 Sep 2007 20:09:26 +1000 + +duplicity (0.4.2-16) unstable; urgency=low + + * added example backup script (closes: #408749) + * re-added ftp-timeout-patch, which was lost somewhere around 0.4.2-6 + and added pending ftp-mkdir-patch (closes: #413335) + + -- Alexander Zangerl Tue, 19 Jun 2007 12:38:43 +1000 + +duplicity (0.4.2-15) unstable; urgency=low + + * added --help option and usage message (closes: #345165) + + -- Alexander Zangerl Tue, 19 Jun 2007 12:09:21 +1000 + +duplicity (0.4.2-14) unstable; urgency=high + + * fixed bad patch sequence that broke sftp support (closes: #426819) + + -- Alexander Zangerl Fri, 1 Jun 2007 00:19:32 +1000 + +duplicity (0.4.2-13) unstable; urgency=low + + * added a --volsize option to allow user-specified volume chunks + instead of always splitting at 5Mb. + + -- Alexander Zangerl Thu, 24 May 2007 22:48:52 +1000 + +duplicity (0.4.2-12) unstable; urgency=low + + * reworked the patch set + * added patch for archive-dir and incrementals (closes: #370206) + * added patch for encrypted unattended backups + with archive-dir (closes: #369971) + + -- Alexander Zangerl Tue, 10 Apr 2007 14:28:13 +1000 + +duplicity (0.4.2-11) unstable; urgency=low + + * I'm adopting duplicity. Thanks to Martin Wuertele + for his past work on duplicity! (closes: #418159) + * finetuned debhelper dependency + + -- Alexander Zangerl Sun, 8 Apr 2007 17:40:30 +1000 + +duplicity (0.4.2-10.1) unstable; urgency=medium + + * Switch back to python 2.4, as python-central can apparently no longer cope + with 2.3, and 2.4 seems to work ok now; patch from Joey Hess. + (Closes: #396158) + + -- Steinar H. Gunderson Sat, 11 Nov 2006 13:32:07 +0100 + +duplicity (0.4.2-10) unstable; urgency=low + + * fix build target (Closes: #386933) + + -- Martin Wuertele Sat, 16 Sep 2006 10:22:28 +0200 + +duplicity (0.4.2-9) unstable; urgency=low + + * switched to python-central + * removed modules patch (no more needed) + + -- Martin Wuertele Sun, 10 Sep 2006 14:29:07 +0200 + +duplicity (0.4.2-8) unstable; urgency=high + + * depend on python2.3 fixing restore (Closes: #386607) + + -- Martin Wuertele Sat, 9 Sep 2006 11:10:48 +0200 + +duplicity (0.4.2-7.1) unstable; urgency=high + + * NMU + * Don't call dh_pysupport with -n; we need those generated manintainer + scripts to, well, work. Closes: #384489, #384826 + + -- Joey Hess Fri, 8 Sep 2006 01:41:52 -0400 + +duplicity (0.4.2-7) unstable; urgency=low + + * Fix arch so _librsync.so gets compiled (Closes: #385989) + + -- Martin Wuertele Mon, 4 Sep 2006 22:25:09 +0200 + +duplicity (0.4.2-6) unstable; urgency=low + + * switch to dpatch for patch management + * fix private module search path and make sure postint/postrm work + (Closes: #384489) + * updated copyright + + -- Martin Wuertele Sat, 26 Aug 2006 23:25:57 +0200 + +duplicity (0.4.2-5) unstable; urgency=low + + * removed patches from debian-revision + * added README.Debian describing applied patches + * fix targets (Closes: #384570) + + -- Martin Wuertele Fri, 25 Aug 2006 17:39:09 +0200 + +duplicity (0.4.2-4+sftp+compression) unstable; urgency=low + + * temporary disable amazons3 patch + * don't pass /usr/share/python-support to dh_pysupport to fix + searchpath (Closes: #384489) + + -- Martin Wuertele Thu, 24 Aug 2006 19:55:40 +0200 + +duplicity (0.4.2-3+sftp+amazons3+compression) unstable; urgency=low + + * remove old byte compiled stuff in preinst (Closes: #384142) + + -- Martin Wuertele Tue, 22 Aug 2006 22:26:46 +0200 + +duplicity (0.4.2-2+sftp+amazons3+compression.2) unstable; urgency=low + + * Non-maintainer upload. + * Update package to the last python policy (Closes: #380784). + + -- Pierre Habouzit Sat, 12 Aug 2006 23:20:21 +0200 + +duplicity (0.4.2-2+sftp+amazons3+compression.1) unstable; urgency=low + + * NMU + * Fix echo -e bashism. Closes: #375543 + + -- Joey Hess Wed, 5 Jul 2006 16:09:56 -0400 + +duplicity (0.4.2-2+sftp+amazons3+compression) unstable; urgency=low + + * changed build-depends from python2.3-dev to python-dev >= 2.3 + (Closes: #367484) + + -- Martin Wuertele Thu, 18 May 2006 13:35:15 -0500 + +duplicity (0.4.2-1+sftp+amazons3+compression) unstable; urgency=low + + * new upstream release (Closes: #358519) + * fixes some scp/sftp problems + * understands ftp 450 (Closes: #238677) + * --remove-older-than makes sure duplicity deletes older signatures + * --remove-older-than now cannot delete the active backup chain + (Closes: #228386) + * added sftp patch by intrigeri + * added amazon s3 patch by Brian Sutherland + * added compression patch by Mathias Wagner + + + -- Martin Wuertele Mon, 15 May 2006 13:44:05 -0500 + +duplicity (0.4.1-8) unstable; urgency=high + + * added patch to fix ftp timeout exception when backing up huge files with + small changes (patch by Stefan Schimanski ) + + -- Martin Wuertele Mon, 6 Sep 2004 18:57:42 +0200 + +duplicity (0.4.1-7) unstable; urgency=low + + * fixed linebreak in duplicity.1 (Thanks to Uli Martens + + -- Martin Wuertele Fri, 3 Sep 2004 16:36:45 +0200 + +duplicity (0.4.1-6) unstable; urgency=low + + * fixed permissions for tarfile.py + * converted changelog to UTF-8 + * fixed python dependency to 2.3 in tarfile.py + + -- Martin Wuertele Sat, 1 May 2004 22:27:22 +0200 + +duplicity (0.4.1-5) unstable; urgency=low + + * Depend on python-gnupginterface instead of providing GnuPGInterface.py + (Closes: #230048) + + -- Martin Wuertele Fri, 30 Jan 2004 18:13:05 +0100 + +duplicity (0.4.1-4) unstable; urgency=low + + * removed byte compiled code and added postinst to do so + (Closes: #221399) + + -- Martin Wuertele Thu, 20 Nov 2003 19:49:57 +0100 + +duplicity (0.4.1-3) unstable; urgency=low + + * removed CHANGELOG.gz from package + (Closes: #219784) + + -- Martin Wuertele Sun, 9 Nov 2003 19:51:53 +0100 + +duplicity (0.4.1-2) unstable; urgency=low + + * use librsync.h and depend on librsync-dev >= 0.9.6 since prior versions + provide rsync.h + + -- Martin Wuertele Sun, 31 Aug 2003 17:19:58 +0200 + +duplicity (0.4.1-1) unstable; urgency=low + + * new upstream release + + -- Martin Würtele Mon, 11 Aug 2003 21:09:56 +0200 + +duplicity (0.4.0-4) unstable; urgency=low + + * fixed auto build problem + (Closes: #204720) + + -- Martin Würtele Sun, 10 Aug 2003 14:03:20 +0200 + +duplicity (0.4.0-3) unstable; urgency=low + + * recompiled witch python 2.3 + + -- Martin Würtele Sat, 9 Aug 2003 09:17:33 +0200 + +duplicity (0.4.0-2) unstable; urgency=low + + * applied LongLink patch from cvs + * added tarfile license to copyright + + -- Martin Würtele Fri, 8 Aug 2003 16:06:27 +0200 + +duplicity (0.4.0-1) unstable; urgency=low + + * Initial Release. + (Closes: #188713) + + -- Martin Wuertele Sat, 12 Apr 2003 17:06:27 +0200 + --- duplicity-0.6.15.orig/debian/duplicity.examples +++ duplicity-0.6.15/debian/duplicity.examples @@ -0,0 +1 @@ +debian/examples/system-backup --- duplicity-0.6.15.orig/debian/preinst +++ duplicity-0.6.15/debian/preinst @@ -0,0 +1,8 @@ +#!/bin/sh -e + +# fix my previous errors + +rm -rf /usr/lib/python2.2/site-packages/duplicity/ +rm -rf /usr/lib/python2.3/site-packages/duplicity/ + +#DEBHELPER# --- duplicity-0.6.15.orig/debian/dirs +++ duplicity-0.6.15/debian/dirs @@ -0,0 +1 @@ +usr/bin --- duplicity-0.6.15.orig/debian/duplicity.docs +++ duplicity-0.6.15/debian/duplicity.docs @@ -0,0 +1,2 @@ +debian/README.source +LOG-README --- duplicity-0.6.15.orig/debian/README.source +++ duplicity-0.6.15/debian/README.source @@ -0,0 +1,38 @@ +This package uses dpatch to manage all modifications to the upstream +source. Changes are stored in the source package as diffs in +debian/patches and applied during the build. + +To get the fully patched source after unpacking the source package, cd +to the root level of the source package and run: + + debian/rules patch + +Removing a patch is as simple as removing its entry from the +debian/patches/00list file, and please also remove the patch file +itself. + +Creating a new patch is done with "dpatch-edit-patch patch XX_patchname" +where you should replace XX with a new number and patchname with a +descriptive shortname of the patch. You can then simply edit all the +files your patch wants to edit, and then simply "exit 0" from the shell +to actually create the patch file. + +To tweak an already existing patch, call "dpatch-edit-patch XX_patchname" +and replace XX_patchname with the actual filename from debian/patches +you want to use. + +To clean up afterwards again, "debian/rules unpatch" will do the +work for you - or you can of course choose to call +"fakeroot debian/rules clean" all together. + + +--- + +this documentation is part of dpatch package, and may be used by +packages using dpatch to comply with policy on README.source. This +documentation is meant to be useful to users who are not proficient in +dpatch in doing work with dpatch-based packages. Please send any +improvements to the BTS of dpatch package. + +original text by Gerfried Fuchs, edited by Junichi Uekawa +10 Aug 2008. --- duplicity-0.6.15.orig/debian/control +++ duplicity-0.6.15/debian/control @@ -0,0 +1,22 @@ +Source: duplicity +Section: utils +Priority: optional +Maintainer: Ubuntu Developers +XSBC-Original-Maintainer: Alexander Zangerl +Build-Depends: debhelper (>= 5.0.37.2), librsync-dev (>=0.9.6), python-dev (>= 2.6.6-3), dpatch +Standards-Version: 3.9.1 +X-Python-Version: >= 2.5 + +Package: duplicity +Architecture: any +Homepage: http://duplicity.nongnu.org/ +Depends: ${shlibs:Depends}, ${python:Depends}, ${misc:Depends}, python-pexpect (>=2.3-1), python-gnupginterface (>=0.3.2-9.1) +Suggests: python-boto, ncftp, rsync, ssh +Description: encrypted bandwidth-efficient backup + Duplicity backs directories by producing encrypted tar-format volumes + and uploading them to a remote or local file server. Because duplicity + uses librsync, the incremental archives are space efficient and only + record the parts of files that have changed since the last backup. + Because duplicity uses GnuPG to encrypt and/or sign these archives, they + will be safe from spying and/or modification by the server. + --- duplicity-0.6.15.orig/debian/watch +++ duplicity-0.6.15/debian/watch @@ -0,0 +1,2 @@ +version=3 +http://launchpad.net/duplicity/+download .*/duplicity-(.+)\.tar\.gz --- duplicity-0.6.15.orig/debian/patches/00list +++ duplicity-0.6.15/debian/patches/00list @@ -0,0 +1,7 @@ +01pexpect +02cachedesync +03forcecleanup +05upstreamgpgintf +06_use_passphrase +07_large_rackspace_list +08_check_volumes --- duplicity-0.6.15.orig/debian/patches/07_large_rackspace_list.dpatch +++ duplicity-0.6.15/debian/patches/07_large_rackspace_list.dpatch @@ -0,0 +1,25 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 07_large_rackspace_list.dpatch by Michael Terry +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: No description. + +@DPATCH@ +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backends/cloudfilesbackend.py duplicity-0.6.15/src/backends/cloudfilesbackend.py +--- duplicity-0.6.15~/src/backends/cloudfilesbackend.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/backends/cloudfilesbackend.py 2011-09-08 09:06:50.141181113 -0400 +@@ -119,7 +119,13 @@ + for n in range(1, globals.num_retries+1): + log.Info("Listing '%s'" % (self.container)) + try: +- keys = self.container.list_objects() ++ # Cloud Files will return a max of 10,000 objects. We have ++ # to make multiple requests to get them all. ++ objs = self.container.list_objects() ++ keys = objs ++ while len(objs) == 10000: ++ objs = self.container.list_objects(marker=keys[-1]) ++ keys += objs + return keys + except self.resp_exc, resperr: + log.Warn("Listing of '%s' failed (attempt %s): CloudFiles returned: %s %s" --- duplicity-0.6.15.orig/debian/patches/06_use_passphrase.dpatch +++ duplicity-0.6.15/debian/patches/06_use_passphrase.dpatch @@ -0,0 +1,1442 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 06_use_passphrase.dpatch by Michael Terry +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: No description. + +@DPATCH@ +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/duplicity duplicity-0.6.15/duplicity +--- duplicity-0.6.15~/duplicity 2011-09-08 09:02:47.045178540 -0400 ++++ duplicity-0.6.15/duplicity 2011-09-08 09:04:01.657179330 -0400 +@@ -90,15 +90,15 @@ + ## if signing key is also an encryption key assume that the passphrase is identical + if ( for_signing + and globals.gpg_profile.sign_key in globals.gpg_profile.recipients +- and globals.gpg_profile.passphrase is not None ): +- log.Notice(_("Reuse already set PASSPHRASE as SIGNING_PASSPHRASE")) +- return globals.gpg_profile.passphrase ++ and 'PASSPHRASE' in os.environ ): ++ log.Notice(_("Reuse configured PASSPHRASE as SIGN_PASSPHRASE")) ++ return os.environ['PASSPHRASE'] + ## if one encryption key is also the signing key assume that the passphrase is identical + if ( not for_signing + and globals.gpg_profile.sign_key in globals.gpg_profile.recipients +- and globals.gpg_profile.signing_passphrase is not None ): +- log.Notice(_("Reuse already set SIGNING_PASSPHRASE as PASSPHRASE")) +- return globals.gpg_profile.signing_passphrase ++ and 'SIGN_PASSPHRASE' in os.environ ): ++ log.Notice(_("Reuse configured SIGN_PASSPHRASE as PASSPHRASE")) ++ return os.environ['SIGN_PASSPHRASE'] + + # Next, verify we need to ask the user + +@@ -1280,7 +1280,7 @@ + # the sign key can have a different passphrase than the encrypt + # key, therefore request a passphrase + if globals.gpg_profile.sign_key: +- globals.gpg_profile.signing_passphrase = get_passphrase(3, action, True) ++ globals.gpg_profile.signing_passphrase = get_passphrase(1, action, True) + + # if there are no recipients (no --encrypt-key), it must be a + # symmetric key. Therefore, confirm the passphrase +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/duplicity.orig duplicity-0.6.15/duplicity.orig +--- duplicity-0.6.15~/duplicity.orig 1969-12-31 19:00:00.000000000 -0500 ++++ duplicity-0.6.15/duplicity.orig 2011-09-08 09:02:47.000000000 -0400 +@@ -0,0 +1,1397 @@ ++#!/usr/bin/env python ++# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- ++# ++# duplicity -- Encrypted bandwidth efficient backup ++# Version 0.6.15 released August 19, 2011 ++# ++# Copyright 2002 Ben Escoto ++# Copyright 2007 Kenneth Loafman ++# ++# This file is part of duplicity. ++# ++# Duplicity is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License as published by the ++# Free Software Foundation; either version 2 of the License, or (at your ++# option) any later version. ++# ++# Duplicity is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with duplicity; if not, write to the Free Software Foundation, ++# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++# ++# See http://www.nongnu.org/duplicity for more information. ++# Please send mail to me or the mailing list if you find bugs or have ++# any suggestions. ++ ++import getpass, gzip, os, sys, time, types ++import traceback, platform, statvfs, resource, re ++ ++import gettext ++gettext.install('duplicity') ++ ++from duplicity import log ++log.setup() ++ ++import duplicity.errors ++ ++from duplicity import collections ++from duplicity import commandline ++from duplicity import diffdir ++from duplicity import dup_temp ++from duplicity import dup_time ++from duplicity import file_naming ++from duplicity import globals ++from duplicity import gpg ++from duplicity import manifest ++from duplicity import patchdir ++from duplicity import path ++from duplicity import robust ++from duplicity import tempdir ++from duplicity import asyncscheduler ++from duplicity import util ++ ++# If exit_val is not None, exit with given value at end. ++exit_val = None ++ ++ ++def get_passphrase(n, action, for_signing = False): ++ """ ++ Check to make sure passphrase is indeed needed, then get ++ the passphrase from environment, from gpg-agent, or user ++ ++ If n=3, a password is requested and verified. If n=2, the current ++ password is verified. If n=1, a password is requested without ++ verification for the time being. ++ ++ @type n: int ++ @param n: verification level for a passphrase being requested ++ @type action: string ++ @param action: action to perform ++ @type for_signing: boolean ++ @param for_signing: true if the passphrase is for a signing key, false if not ++ @rtype: string ++ @return: passphrase ++ """ ++ ++ # First try the environment ++ try: ++ if for_signing: ++ return os.environ['SIGN_PASSPHRASE'] ++ else: ++ return os.environ['PASSPHRASE'] ++ except KeyError: ++ pass ++ ++ # check if we can reuse an already set (signing_)passphrase ++ ## if signing key is also an encryption key assume that the passphrase is identical ++ if ( for_signing ++ and globals.gpg_profile.sign_key in globals.gpg_profile.recipients ++ and globals.gpg_profile.passphrase is not None ): ++ log.Notice(_("Reuse already set PASSPHRASE as SIGNING_PASSPHRASE")) ++ return globals.gpg_profile.passphrase ++ ## if one encryption key is also the signing key assume that the passphrase is identical ++ if ( not for_signing ++ and globals.gpg_profile.sign_key in globals.gpg_profile.recipients ++ and globals.gpg_profile.signing_passphrase is not None ): ++ log.Notice(_("Reuse already set SIGNING_PASSPHRASE as PASSPHRASE")) ++ return globals.gpg_profile.signing_passphrase ++ ++ # Next, verify we need to ask the user ++ ++ # Assumptions: ++ # - encrypt-key has no passphrase ++ # - sign-key requires passphrase ++ # - gpg-agent supplies all, no user interaction ++ ++ # no passphrase if --no-encryption or --use-agent ++ if not globals.encryption or globals.use_agent: ++ return "" ++ ++ # these commands don't need a password ++ elif action in ["collection-status", ++ "list-current", ++ "remove-all-but-n-full", ++ "remove-all-inc-of-but-n-full", ++ "remove-old", ++ ]: ++ return "" ++ ++ # for a full backup, we don't need a password if ++ # there is no sign_key and there are recipients ++ elif (action == "full" ++ and globals.gpg_profile.recipients ++ and not globals.gpg_profile.sign_key): ++ return "" ++ ++ # for an inc backup, we don't need a password if ++ # there is no sign_key and there are recipients ++ elif (action == "inc" ++ and globals.gpg_profile.recipients ++ and not globals.gpg_profile.sign_key): ++ return "" ++ ++ # Finally, ask the user for the passphrase ++ else: ++ log.Info(_("PASSPHRASE variable not set, asking user.")) ++ use_cache = True ++ while 1: ++ # ask the user to enter a new passphrase to avoid an infinite loop ++ # if the user made a typo in the first passphrase ++ if use_cache and n == 2: ++ if for_signing: ++ pass1 = globals.gpg_profile.signing_passphrase ++ else: ++ pass1 = globals.gpg_profile.passphrase ++ else: ++ if for_signing: ++ if use_cache and globals.gpg_profile.signing_passphrase: ++ pass1 = globals.gpg_profile.signing_passphrase ++ else: ++ pass1 = getpass.getpass(_("GnuPG passphrase for signing key:")+" ") ++ else: ++ if use_cache and globals.gpg_profile.passphrase: ++ pass1 = globals.gpg_profile.passphrase ++ else: ++ pass1 = getpass.getpass(_("GnuPG passphrase:")+" ") ++ ++ if n == 1: ++ pass2 = pass1 ++ elif for_signing: ++ pass2 = getpass.getpass(_("Retype passphrase for signing key to confirm: ")) ++ else: ++ pass2 = getpass.getpass(_("Retype passphrase to confirm: ")) ++ ++ if not pass1 == pass2: ++ print _("First and second passphrases do not match! Please try again.") ++ use_cache = False ++ continue ++ ++ if not pass1 and not globals.gpg_profile.recipients and not for_signing: ++ print _("Cannot use empty passphrase with symmetric encryption! Please try again.") ++ use_cache = False ++ continue ++ ++ return pass1 ++ ++ ++def dummy_backup(tarblock_iter): ++ """ ++ Fake writing to backend, but do go through all the source paths. ++ ++ @type tarblock_iter: tarblock_iter ++ @param tarblock_iter: iterator for current tar block ++ ++ @rtype: int ++ @return: constant 0 (zero) ++ """ ++ try: ++ # Just spin our wheels ++ while tarblock_iter.next(): ++ pass ++ except StopIteration: ++ pass ++ log.Progress(None, diffdir.stats.SourceFileSize) ++ return 0 ++ ++ ++def restart_position_iterator(tarblock_iter): ++ """ ++ Fake writing to backend, but do go through all the source paths. ++ Stop when we have processed the last file and block from the ++ last backup. Normal backup will proceed at the start of the ++ next volume in the set. ++ ++ @type tarblock_iter: tarblock_iter ++ @param tarblock_iter: iterator for current tar block ++ ++ @rtype: int ++ @return: constant 0 (zero) ++ """ ++ last_index = globals.restart.last_index ++ last_block = globals.restart.last_block ++ try: ++ # Just spin our wheels ++ while tarblock_iter.next(): ++ if (tarblock_iter.previous_index == last_index): ++ if (tarblock_iter.previous_block > last_block): ++ break ++ if tarblock_iter.previous_index > last_index: ++ log.Warn(_("File %s complete in backup set.\n" ++ "Continuing restart on file %s.") % ++ ("/".join(last_index), "/".join(tarblock_iter.previous_index)), ++ log.ErrorCode.restart_file_not_found) ++ break ++ except StopIteration: ++ log.Warn(_("File %s missing in backup set.\n" ++ "Continuing restart on file %s.") % ++ ("/".join(last_index), "/".join(tarblock_iter.previous_index)), ++ log.ErrorCode.restart_file_not_found) ++ return 0 ++ ++ ++def write_multivol(backup_type, tarblock_iter, man_outfp, sig_outfp, backend): ++ """ ++ Encrypt volumes of tarblock_iter and write to backend ++ ++ backup_type should be "inc" or "full" and only matters here when ++ picking the filenames. The path_prefix will determine the names ++ of the files written to backend. Also writes manifest file. ++ Returns number of bytes written. ++ ++ @type backup_type: string ++ @param backup_type: type of backup to perform, either 'inc' or 'full' ++ @type tarblock_iter: tarblock_iter ++ @param tarblock_iter: iterator for current tar block ++ @type backend: callable backend object ++ @param backend: I/O backend for selected protocol ++ ++ @rtype: int ++ @return: bytes written ++ """ ++ ++ def get_indicies(tarblock_iter): ++ """Return start_index and end_index of previous volume""" ++ start_index, start_block = tarblock_iter.recall_index() ++ if start_index is None: ++ start_index = () ++ start_block = None ++ if start_block: ++ start_block -= 1 ++ end_index, end_block = tarblock_iter.get_previous_index() ++ if end_index is None: ++ end_index = start_index ++ end_block = start_block ++ if end_block: ++ end_block -= 1 ++ return start_index, start_block, end_index, end_block ++ ++ def put(tdp, dest_filename): ++ """ ++ Retrieve file size *before* calling backend.put(), which may (at least ++ in case of the localbackend) rename the temporary file to the target ++ instead of copying. ++ """ ++ putsize = tdp.getsize() ++ backend.put(tdp, dest_filename) ++ if tdp.stat: ++ tdp.delete() ++ return putsize ++ ++ if not globals.restart: ++ # normal backup start ++ vol_num = 0 ++ mf = manifest.Manifest(fh=man_outfp) ++ mf.set_dirinfo() ++ else: ++ # restart from last known position ++ mf = globals.restart.last_backup.get_local_manifest() ++ globals.restart.checkManifest(mf) ++ globals.restart.setLastSaved(mf) ++ mf.fh = man_outfp ++ last_block = globals.restart.last_block ++ log.Notice("Restarting after volume %s, file %s, block %s" % ++ (globals.restart.start_vol, ++ "/".join(globals.restart.last_index), ++ globals.restart.last_block)) ++ vol_num = globals.restart.start_vol ++ restart_position_iterator(tarblock_iter) ++ ++ at_end = 0 ++ bytes_written = 0 ++ ++ # This assertion must be kept until we have solved the problem ++ # of concurrency at the backend level. Concurrency 1 is fine ++ # because the actual I/O concurrency on backends is limited to ++ # 1 as usual, but we are allowed to perform local CPU ++ # intensive tasks while that single upload is happening. This ++ # is an assert put in place to avoid someone accidentally ++ # enabling concurrency above 1, before adequate work has been ++ # done on the backends to make them support concurrency. ++ assert globals.async_concurrency <= 1 ++ ++ io_scheduler = asyncscheduler.AsyncScheduler(globals.async_concurrency) ++ async_waiters = [] ++ ++ while not at_end: ++ # set up iterator ++ tarblock_iter.remember_next_index() # keep track of start index ++ ++ # Create volume ++ vol_num += 1 ++ dest_filename = file_naming.get(backup_type, vol_num, ++ encrypted=globals.encryption, ++ gzipped=not globals.encryption) ++ tdp = dup_temp.new_tempduppath(file_naming.parse(dest_filename)) ++ ++ # write volume ++ if globals.encryption: ++ at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, ++ globals.gpg_profile, globals.volsize) ++ else: ++ at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, globals.volsize) ++ tdp.setdata() ++ ++ # Add volume information to manifest ++ vi = manifest.VolumeInfo() ++ vi.set_info(vol_num, *get_indicies(tarblock_iter)) ++ vi.set_hash("SHA1", gpg.get_hash("SHA1", tdp)) ++ mf.add_volume_info(vi) ++ ++ # Checkpoint after each volume so restart has a place to restart. ++ # Note that until after the first volume, all files are temporary. ++ if vol_num == 1: ++ sig_outfp.to_partial() ++ man_outfp.to_partial() ++ else: ++ sig_outfp.flush() ++ man_outfp.flush() ++ ++ async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename: put(tdp, dest_filename), ++ (tdp, dest_filename))) ++ ++ # Log human-readable version as well as raw numbers for machine consumers ++ log.Progress('Processed volume %d' % vol_num, diffdir.stats.SourceFileSize) ++ ++ # for testing purposes only - assert on inc or full ++ assert globals.fail_on_volume != vol_num, "Forced assertion for testing at volume %d" % vol_num ++ ++ # Collect byte count from all asynchronous jobs; also implicitly waits ++ # for them all to complete. ++ for waiter in async_waiters: ++ bytes_written += waiter() ++ ++ # Upload the collection summary. ++ #bytes_written += write_manifest(mf, backup_type, backend) ++ ++ return bytes_written ++ ++ ++def get_man_fileobj(backup_type): ++ """ ++ Return a fileobj opened for writing, save results as manifest ++ ++ Save manifest in globals.archive_dir gzipped. ++ Save them on the backend encrypted as needed. ++ ++ @type man_type: string ++ @param man_type: either "full" or "new" ++ ++ @rtype: fileobj ++ @return: fileobj opened for writing ++ """ ++ assert backup_type == "full" or backup_type == "inc" ++ ++ part_man_filename = file_naming.get(backup_type, ++ manifest=True, ++ partial=True) ++ perm_man_filename = file_naming.get(backup_type, ++ manifest=True) ++ remote_man_filename = file_naming.get(backup_type, ++ manifest=True, ++ encrypted=globals.encryption) ++ ++ fh = dup_temp.get_fileobj_duppath(globals.archive_dir, ++ part_man_filename, ++ perm_man_filename, ++ remote_man_filename) ++ return fh ++ ++ ++def get_sig_fileobj(sig_type): ++ """ ++ Return a fileobj opened for writing, save results as signature ++ ++ Save signatures in globals.archive_dir gzipped. ++ Save them on the backend encrypted as needed. ++ ++ @type sig_type: string ++ @param sig_type: either "full-sig" or "new-sig" ++ ++ @rtype: fileobj ++ @return: fileobj opened for writing ++ """ ++ assert sig_type in ["full-sig", "new-sig"] ++ ++ part_sig_filename = file_naming.get(sig_type, ++ gzipped=False, ++ partial=True) ++ perm_sig_filename = file_naming.get(sig_type, ++ gzipped=True) ++ remote_sig_filename = file_naming.get(sig_type, encrypted=globals.encryption, ++ gzipped=not globals.encryption) ++ ++ fh = dup_temp.get_fileobj_duppath(globals.archive_dir, ++ part_sig_filename, ++ perm_sig_filename, ++ remote_sig_filename) ++ return fh ++ ++ ++def full_backup(col_stats): ++ """ ++ Do full backup of directory to backend, using archive_dir ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ if globals.dry_run: ++ tarblock_iter = diffdir.DirFull(globals.select) ++ bytes_written = dummy_backup(tarblock_iter) ++ col_stats.set_values(sig_chain_warning=None) ++ else: ++ sig_outfp = get_sig_fileobj("full-sig") ++ man_outfp = get_man_fileobj("full") ++ tarblock_iter = diffdir.DirFull_WriteSig(globals.select, ++ sig_outfp) ++ bytes_written = write_multivol("full", tarblock_iter, ++ man_outfp, sig_outfp, ++ globals.backend) ++ ++ # close sig file, send to remote, and rename to final ++ sig_outfp.close() ++ sig_outfp.to_remote() ++ sig_outfp.to_final() ++ ++ # close manifest, send to remote, and rename to final ++ man_outfp.close() ++ man_outfp.to_remote() ++ man_outfp.to_final() ++ ++ col_stats.set_values(sig_chain_warning=None) ++ ++ print_statistics(diffdir.stats, bytes_written) ++ ++ ++def check_sig_chain(col_stats): ++ """ ++ Get last signature chain for inc backup, or None if none available ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ """ ++ if not col_stats.matched_chain_pair: ++ if globals.incremental: ++ log.FatalError(_("Fatal Error: Unable to start incremental backup. " ++ "Old signatures not found and incremental specified"), ++ log.ErrorCode.inc_without_sigs) ++ else: ++ log.Warn(_("No signatures found, switching to full backup.")) ++ return None ++ return col_stats.matched_chain_pair[0] ++ ++ ++def print_statistics(stats, bytes_written): ++ """ ++ If globals.print_statistics, print stats after adding bytes_written ++ ++ @rtype: void ++ @return: void ++ """ ++ if globals.print_statistics: ++ diffdir.stats.TotalDestinationSizeChange = bytes_written ++ print diffdir.stats.get_stats_logstring(_("Backup Statistics")) ++ ++ ++def incremental_backup(sig_chain): ++ """ ++ Do incremental backup of directory to backend, using archive_dir ++ ++ @rtype: void ++ @return: void ++ """ ++ dup_time.setprevtime(sig_chain.end_time) ++ if dup_time.curtime == dup_time.prevtime: ++ time.sleep(2) ++ dup_time.setcurtime() ++ assert dup_time.curtime != dup_time.prevtime, "time not moving forward at appropriate pace - system clock issues?" ++ if globals.dry_run: ++ tarblock_iter = diffdir.DirDelta(globals.select, ++ sig_chain.get_fileobjs()) ++ bytes_written = dummy_backup(tarblock_iter) ++ else: ++ new_sig_outfp = get_sig_fileobj("new-sig") ++ new_man_outfp = get_man_fileobj("inc") ++ tarblock_iter = diffdir.DirDelta_WriteSig(globals.select, ++ sig_chain.get_fileobjs(), ++ new_sig_outfp) ++ bytes_written = write_multivol("inc", tarblock_iter, ++ new_man_outfp, new_sig_outfp, ++ globals.backend) ++ ++ # close sig file and rename to final ++ new_sig_outfp.close() ++ new_sig_outfp.to_remote() ++ new_sig_outfp.to_final() ++ ++ # close manifest and rename to final ++ new_man_outfp.close() ++ new_man_outfp.to_remote() ++ new_man_outfp.to_final() ++ ++ print_statistics(diffdir.stats, bytes_written) ++ ++ ++def list_current(col_stats): ++ """ ++ List the files current in the archive (examining signature only) ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ time = globals.restore_time or dup_time.curtime ++ sig_chain = col_stats.get_signature_chain_at_time(time) ++ path_iter = diffdir.get_combined_path_iter(sig_chain.get_fileobjs(time)) ++ for path in path_iter: ++ if path.difftype != "deleted": ++ user_info = "%s %s" % (dup_time.timetopretty(path.getmtime()), ++ path.get_relative_path()) ++ log_info = "%s %s" % (dup_time.timetostring(path.getmtime()), ++ util.escape(path.get_relative_path())) ++ log.Log(user_info, log.INFO, log.InfoCode.file_list, ++ log_info, True) ++ ++ ++def restore(col_stats): ++ """ ++ Restore archive in globals.backend to globals.local_path ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ if globals.dry_run: ++ return ++ if not patchdir.Write_ROPaths(globals.local_path, ++ restore_get_patched_rop_iter(col_stats)): ++ if globals.restore_dir: ++ log.FatalError(_("%s not found in archive, no files restored.") ++ % (globals.restore_dir,), ++ log.ErrorCode.restore_dir_not_found) ++ else: ++ log.FatalError(_("No files found in archive - nothing restored."), ++ log.ErrorCode.no_restore_files) ++ ++ ++def restore_get_patched_rop_iter(col_stats): ++ """ ++ Return iterator of patched ROPaths of desired restore data ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ """ ++ if globals.restore_dir: ++ index = tuple(globals.restore_dir.split("/")) ++ else: ++ index = () ++ time = globals.restore_time or dup_time.curtime ++ backup_chain = col_stats.get_backup_chain_at_time(time) ++ assert backup_chain, col_stats.all_backup_chains ++ backup_setlist = backup_chain.get_sets_at_time(time) ++ num_vols = 0 ++ for s in backup_setlist: ++ num_vols += len(s) ++ cur_vol = [0] ++ ++ def get_fileobj_iter(backup_set): ++ """Get file object iterator from backup_set contain given index""" ++ manifest = backup_set.get_manifest() ++ volumes = manifest.get_containing_volumes(index) ++ for vol_num in volumes: ++ yield restore_get_enc_fileobj(backup_set.backend, ++ backup_set.volume_name_dict[vol_num], ++ manifest.volume_info_dict[vol_num]) ++ cur_vol[0] += 1 ++ log.Progress(_('Processed volume %d of %d') % (cur_vol[0], num_vols), ++ cur_vol[0], num_vols) ++ ++ fileobj_iters = map(get_fileobj_iter, backup_setlist) ++ tarfiles = map(patchdir.TarFile_FromFileobjs, fileobj_iters) ++ return patchdir.tarfiles2rop_iter(tarfiles, index) ++ ++ ++def restore_get_enc_fileobj(backend, filename, volume_info): ++ """ ++ Return plaintext fileobj from encrypted filename on backend ++ ++ If volume_info is set, the hash of the file will be checked, ++ assuming some hash is available. Also, if globals.sign_key is ++ set, a fatal error will be raised if file not signed by sign_key. ++ ++ """ ++ parseresults = file_naming.parse(filename) ++ tdp = dup_temp.new_tempduppath(parseresults) ++ backend.get(filename, tdp) ++ ++ """ verify hash of the remote file """ ++ verified, hash_pair, calculated_hash = restore_check_hash(volume_info, tdp) ++ if not verified: ++ log.FatalError("%s\n %s\n %s\n %s\n" % ++ (_("Invalid data - %s hash mismatch for file:") % hash_pair[0], ++ filename, ++ _("Calculated hash: %s") % calculated_hash, ++ _("Manifest hash: %s") % hash_pair[1]), ++ log.ErrorCode.mismatched_hash) ++ ++ fileobj = tdp.filtered_open_with_delete("rb") ++ if parseresults.encrypted and globals.gpg_profile.sign_key: ++ restore_add_sig_check(fileobj) ++ return fileobj ++ ++ ++def restore_check_hash(volume_info, vol_path): ++ """ ++ Check the hash of vol_path path against data in volume_info ++ ++ @rtype: boolean ++ @return: true (verified) / false (failed) ++ """ ++ hash_pair = volume_info.get_best_hash() ++ if hash_pair: ++ calculated_hash = gpg.get_hash(hash_pair[0], vol_path) ++ if calculated_hash != hash_pair[1]: ++ return False, hash_pair, calculated_hash ++ """ reached here, verification passed """ ++ return True, hash_pair, calculated_hash ++ ++ ++def restore_add_sig_check(fileobj): ++ """ ++ Require signature when closing fileobj matches sig in gpg_profile ++ ++ @rtype: void ++ @return: void ++ """ ++ assert (isinstance(fileobj, dup_temp.FileobjHooked) and ++ isinstance(fileobj.fileobj, gpg.GPGFile)), fileobj ++ def check_signature(): ++ """Thunk run when closing volume file""" ++ actual_sig = fileobj.fileobj.get_signature() ++ if actual_sig != globals.gpg_profile.sign_key: ++ log.FatalError(_("Volume was signed by key %s, not %s") % ++ (actual_sig, globals.gpg_profile.sign_key), ++ log.ErrorCode.unsigned_volume) ++ fileobj.addhook(check_signature) ++ ++ ++def verify(col_stats): ++ """ ++ Verify files, logging differences ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ global exit_val ++ collated = diffdir.collate2iters(restore_get_patched_rop_iter(col_stats), ++ globals.select) ++ diff_count = 0; total_count = 0 ++ for backup_ropath, current_path in collated: ++ if not backup_ropath: ++ backup_ropath = path.ROPath(current_path.index) ++ if not current_path: ++ current_path = path.ROPath(backup_ropath.index) ++ if not backup_ropath.compare_verbose(current_path): ++ diff_count += 1 ++ total_count += 1 ++ # Unfortunately, ngettext doesn't handle multiple number variables, so we ++ # split up the string. ++ log.Notice(_("Verify complete: %s, %s.") % ++ (gettext.ngettext("%d file compared", ++ "%d files compared", total_count) % total_count, ++ gettext.ngettext("%d difference found", ++ "%d differences found", diff_count) % diff_count)) ++ if diff_count >= 1: ++ exit_val = 1 ++ ++ ++def cleanup(col_stats): ++ """ ++ Delete the extraneous files in the current backend ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ ext_local, ext_remote = col_stats.get_extraneous(globals.extra_clean) ++ extraneous = ext_local + ext_remote ++ if not extraneous: ++ log.Warn(_("No extraneous files found, nothing deleted in cleanup.")) ++ return ++ ++ filestr = "\n".join(extraneous) ++ if globals.force: ++ log.Notice(gettext.ngettext("Deleting this file from backend:", ++ "Deleting these files from backend:", ++ len(extraneous)) ++ + "\n" + filestr) ++ if not globals.dry_run: ++ col_stats.backend.delete(ext_remote) ++ for fn in ext_local: ++ try: ++ globals.archive_dir.append(fn).delete() ++ except Exception: ++ pass ++ else: ++ log.Notice(gettext.ngettext("Found the following file to delete:", ++ "Found the following files to delete:", ++ len(extraneous)) ++ + "\n" + filestr + "\n" ++ + _("Run duplicity again with the --force option to actually delete.")) ++ ++ ++def remove_all_but_n_full(col_stats): ++ """ ++ Remove backup files older than the last n full backups. ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ assert globals.keep_chains is not None ++ ++ globals.remove_time = col_stats.get_nth_last_full_backup_time(globals.keep_chains) ++ ++ remove_old(col_stats) ++ ++ ++def remove_old(col_stats): ++ """ ++ Remove backup files older than globals.remove_time from backend ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ assert globals.remove_time is not None ++ def set_times_str(setlist): ++ """Return string listing times of sets in setlist""" ++ return "\n".join(map(lambda s: dup_time.timetopretty(s.get_time()), ++ setlist)) ++ ++ req_list = col_stats.get_older_than_required(globals.remove_time) ++ if req_list: ++ log.Warn("%s\n%s\n%s" % ++ (_("There are backup set(s) at time(s):"), ++ set_times_str(req_list), ++ _("Which can't be deleted because newer sets depend on them."))) ++ ++ if (col_stats.matched_chain_pair and ++ col_stats.matched_chain_pair[1].end_time < globals.remove_time): ++ log.Warn(_("Current active backup chain is older than specified time. " ++ "However, it will not be deleted. To remove all your backups, " ++ "manually purge the repository.")) ++ ++ setlist = col_stats.get_older_than(globals.remove_time) ++ if not setlist: ++ log.Notice(_("No old backup sets found, nothing deleted.")) ++ return ++ if globals.force: ++ log.Notice(gettext.ngettext("Deleting backup set at time:", ++ "Deleting backup sets at times:", ++ len(setlist)) + ++ "\n" + set_times_str(setlist)) ++ setlist.reverse() # save oldest for last ++ for set in setlist: ++ # if remove_all_inc_of_but_n_full_mode mode, remove only incrementals one and not full ++ if globals.dry_run: ++ log.Notice("(Not: dry-run) Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time())) ++ else: ++ if globals.remove_all_inc_of_but_n_full_mode and (set.type != "inc") : ++ log.Notice("Not deleting set " + set.type + " " + dup_time.timetopretty(set.get_time())) ++ else : ++ log.Notice("Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time())) ++ set.delete() ++ col_stats.set_values(sig_chain_warning=None) ++ ++ # force a cleanup operation to get rid of unnecessary old cruft ++ # we said we want to remove them! didn't we, huh? ++ # bad duplicity, bad doggy! ++ # note: in the long run backing out changeset 616 might be ++ # better, but for now this will ease the pain. ++ globals.extra_clean=True ++ cleanup(col_stats) ++ else: ++ log.Notice(gettext.ngettext("Found old backup set at the following time:", ++ "Found old backup sets at the following times:", ++ len(setlist)) + ++ "\n" + set_times_str(setlist) + "\n" + ++ _("Rerun command with --force option to actually delete.")) ++ # see above for rationale. ++ # this here is to print a list of to-be-removed files (--force is off) ++ globals.extra_clean=True ++ cleanup(col_stats) ++ ++ ++def sync_archive(decrypt): ++ """ ++ Synchronize local archive manifest file and sig chains to remote archives. ++ Copy missing files from remote to local as needed to make sure the local ++ archive is synchronized to remote storage. ++ ++ @rtype: void ++ @return: void ++ """ ++ suffixes = [".g", ".gpg", ".z", ".gz", ".part"] ++ ++ def get_metafiles(filelist): ++ """ ++ Return metafiles of interest from the file list. ++ Files of interest are: ++ sigtar - signature files ++ manifest - signature files ++ duplicity partial versions of the above ++ Files excluded are: ++ non-duplicity files ++ ++ @rtype: list ++ @return: list of duplicity metadata files ++ """ ++ metafiles = {} ++ partials = {} ++ need_passphrase = False ++ for fn in filelist: ++ pr = file_naming.parse(fn) ++ if not pr: ++ continue ++ if pr.encrypted: ++ need_passphrase = True ++ if pr.type in ["full-sig", "new-sig"] or pr.manifest: ++ base, ext = os.path.splitext(fn) ++ if ext not in suffixes: ++ base = fn ++ if pr.partial: ++ partials[base] = fn ++ else: ++ metafiles[base] = fn ++ return metafiles, partials, need_passphrase ++ ++ def copy_raw(src_iter, filename): ++ """ ++ Copy data from src_iter to file at fn ++ """ ++ block_size = 128 * 1024 ++ file = open(filename, "wb") ++ while True: ++ try: ++ data = src_iter.next(block_size).data ++ except StopIteration: ++ break ++ file.write(data) ++ file.close() ++ ++ def resolve_basename(fn): ++ """ ++ @return: (parsedresult, local_name, remote_name) ++ """ ++ pr = file_naming.parse(fn) ++ ++ base, ext = os.path.splitext(fn) ++ if ext not in suffixes: ++ base = fn ++ ++ suffix = file_naming.get_suffix(False, not pr.manifest) ++ loc_name = base + suffix ++ ++ return (pr, loc_name, fn) ++ ++ def remove_local(fn): ++ del_name = globals.archive_dir.append(fn).name ++ ++ log.Notice(_("Deleting local %s (not authoritative at backend).") % del_name) ++ try: ++ util.ignore_missing(os.unlink, del_name) ++ except Exception, e: ++ log.Warn(_("Unable to delete %s: %s") % (del_name, str(e))) ++ ++ def copy_to_local(fn): ++ """ ++ Copy remote file fn to local cache. ++ """ ++ class Block: ++ """ ++ Data block to return from SrcIter ++ """ ++ def __init__(self, data): ++ self.data = data ++ ++ class SrcIter: ++ """ ++ Iterate over source and return Block of data. ++ """ ++ def __init__(self, fileobj): ++ self.fileobj = fileobj ++ ++ def next(self, size): ++ try: ++ res = Block(self.fileobj.read(size)) ++ except Exception: ++ if hasattr(self.fileobj, 'name'): ++ name = self.fileobj.name ++ else: ++ name = None ++ log.FatalError(_("Failed to read %s: %s") % ++ (name, sys.exc_info()), ++ log.ErrorCode.generic) ++ if not res.data: ++ self.fileobj.close() ++ raise StopIteration ++ return res ++ ++ def get_footer(self): ++ return "" ++ ++ log.Notice(_("Copying %s to local cache.") % fn) ++ ++ pr, loc_name, rem_name = resolve_basename(fn) ++ ++ fileobj = globals.backend.get_fileobj_read(fn) ++ src_iter = SrcIter(fileobj) ++ tdp = dup_temp.new_tempduppath(file_naming.parse(loc_name)) ++ if pr.manifest: ++ copy_raw(src_iter, tdp.name) ++ else: ++ gpg.GzipWriteFile(src_iter, tdp.name, size=sys.maxint) ++ tdp.setdata() ++ tdp.move(globals.archive_dir.append(loc_name)) ++ ++ # get remote metafile list ++ remlist = globals.backend.list() ++ remote_metafiles, ignored, rem_needpass = get_metafiles(remlist) ++ ++ # get local metafile list ++ loclist = globals.archive_dir.listdir() ++ local_metafiles, local_partials, loc_needpass = get_metafiles(loclist) ++ ++ # we have the list of metafiles on both sides. remote is always ++ # authoritative. figure out which are local spurious (should not ++ # be there) and missing (should be there but are not). ++ local_keys = local_metafiles.keys() ++ remote_keys = remote_metafiles.keys() ++ ++ local_missing = [] ++ local_spurious = [] ++ ++ for key in remote_keys: ++ # If we lost our cache, re-get the remote file. But don't do it if we ++ # already have a local partial. The local partial will already be ++ # complete in this case (seems we got interrupted before we could move ++ # it to its final location). ++ if key not in local_keys and key not in local_partials: ++ local_missing.append(remote_metafiles[key]) ++ ++ for key in local_keys: ++ # If we have a file locally that is unnecessary, delete it. Also ++ # delete final versions of partial files because if we have both, it ++ # means the write of the final version got interrupted. ++ if key not in remote_keys or key in local_partials: ++ local_spurious.append(local_metafiles[key]) ++ ++ # finally finish the process ++ if not local_missing and not local_spurious: ++ log.Notice(_("Local and Remote metadata are synchronized, no sync needed.")) ++ else: ++ local_missing.sort() ++ local_spurious.sort() ++ if not globals.dry_run: ++ log.Notice(_("Synchronizing remote metadata to local cache...")) ++ if local_missing and (rem_needpass or loc_needpass): ++ if decrypt: ++ # password for the --encrypt-key ++ globals.gpg_profile.passphrase = get_passphrase(1, "sync") ++ else: ++ local_missing = [] # don't download if we can't decrypt ++ for fn in local_spurious: ++ remove_local(fn) ++ for fn in local_missing: ++ copy_to_local(fn) ++ else: ++ if local_missing: ++ log.Notice(_("Sync would copy the following from remote to local:") ++ + "\n" + "\n".join(local_missing)) ++ if local_spurious: ++ log.Notice(_("Sync would remove the following spurious local files:") ++ + "\n" + "\n".join(local_spurious)) ++ ++ ++def check_last_manifest(col_stats): ++ """ ++ Check consistency and hostname/directory of last manifest ++ ++ @type col_stats: CollectionStatus object ++ @param col_stats: collection status ++ ++ @rtype: void ++ @return: void ++ """ ++ if not col_stats.all_backup_chains: ++ return ++ last_backup_set = col_stats.all_backup_chains[-1].get_last() ++ last_backup_set.check_manifests() ++ ++ ++def check_resources(action): ++ """ ++ Check for sufficient resources: ++ - temp space for volume build ++ - enough max open files ++ Put out fatal error if not sufficient to run ++ ++ @type action: string ++ @param action: action in progress ++ ++ @rtype: void ++ @return: void ++ """ ++ if action in ["full", "inc", "restore"]: ++ # Make sure we have enough resouces to run ++ # First check disk space in temp area. ++ tempfile, tempname = tempdir.default().mkstemp() ++ os.close(tempfile) ++ # strip off the temp dir and file ++ tempfs = os.path.sep.join(tempname.split(os.path.sep)[:-2]) ++ try: ++ stats = os.statvfs(tempfs) ++ except Exception: ++ log.FatalError(_("Unable to get free space on temp."), ++ log.ErrorCode.get_freespace_failed) ++ # Calculate space we need for at least 2 volumes of full or inc ++ # plus about 30% of one volume for the signature files. ++ freespace = stats[statvfs.F_FRSIZE] * stats[statvfs.F_BAVAIL] ++ needspace = (((globals.async_concurrency + 1) * globals.volsize) ++ + int(0.30 * globals.volsize)) ++ if freespace < needspace: ++ log.FatalError(_("Temp space has %d available, backup needs approx %d.") % ++ (freespace, needspace), log.ErrorCode.not_enough_freespace) ++ else: ++ log.Info(_("Temp has %d available, backup will use approx %d.") % ++ (freespace, needspace)) ++ ++ # Some environments like Cygwin run with an artificially ++ # low value for max open files. Check for safe number. ++ try: ++ soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) ++ except resource.error: ++ log.FatalError(_("Unable to get max open files."), ++ log.ErrorCode.get_ulimit_failed) ++ maxopen = min([l for l in (soft, hard) if l > -1]) ++ if maxopen < 1024: ++ log.FatalError(_("Max open files of %s is too low, should be >= 1024.\n" ++ "Use 'ulimit -n 1024' or higher to correct.\n") % (maxopen,), ++ log.ErrorCode.maxopen_too_low) ++ ++def log_startup_parms(verbosity=log.INFO): ++ """ ++ log Python, duplicity, and system versions ++ """ ++ log.Log('=' * 80, verbosity) ++ log.Log("duplicity 0.6.15 (August 19, 2011)", verbosity) ++ log.Log("Args: %s" % (' '.join(sys.argv),), verbosity) ++ log.Log(' '.join(platform.uname()), verbosity) ++ log.Log("%s %s" % (sys.executable or sys.platform, sys.version), verbosity) ++ log.Log('=' * 80, verbosity) ++ ++ ++class Restart: ++ """ ++ Class to aid in restart of inc or full backup. ++ Instance in globals.restart if restart in progress. ++ """ ++ def __init__(self, last_backup): ++ self.type = None ++ self.start_time = None ++ self.end_time = None ++ self.start_vol = None ++ self.last_index = None ++ self.last_block = None ++ self.last_backup = last_backup ++ self.setParms(last_backup) ++ ++ def setParms(self, last_backup): ++ if last_backup.time: ++ self.type = "full" ++ self.time = last_backup.time ++ else: ++ self.type = "inc" ++ self.end_time = last_backup.end_time ++ self.start_time = last_backup.start_time ++ # We start one volume back in case we weren't able to finish writing ++ # the most recent block. Actually checking if we did (via hash) would ++ # involve downloading the block. Easier to just redo one block. ++ self.start_vol = max(len(last_backup) - 1, 0) ++ ++ def checkManifest(self, mf): ++ mf_len = len(mf.volume_info_dict) ++ if (mf_len != self.start_vol) or not (mf_len and self.start_vol): ++ if self.start_vol == 0: ++ # upload of 1st vol failed, clean and restart ++ log.Notice(_("RESTART: The first volume failed to upload before termination.\n" ++ " Restart is impossible...starting backup from beginning.")) ++ self.last_backup.delete() ++ os.execve(sys.argv[0], sys.argv, os.environ) ++ elif mf_len - self.start_vol > 0: ++ # upload of N vols failed, fix manifest and restart ++ log.Notice(_("RESTART: Volumes %d to %d failed to upload before termination.\n" ++ " Restarting backup at volume %d.") % ++ (self.start_vol + 1, mf_len, self.start_vol + 1)) ++ for vol in range(self.start_vol + 1, mf_len + 1): ++ mf.del_volume_info(vol) ++ else: ++ # this is an 'impossible' state, remove last partial and restart ++ log.Notice(_("RESTART: Impossible backup state: manifest has %d vols, remote has %d vols.\n" ++ " Restart is impossible ... duplicity will clean off the last partial\n" ++ " backup then restart the backup from the beginning.") % ++ (mf_len, self.start_vol)) ++ self.last_backup.delete() ++ os.execve(sys.argv[0], sys.argv[1:], os.environ) ++ ++ def setLastSaved(self, mf): ++ vi = mf.volume_info_dict[self.start_vol] ++ self.last_index = vi.end_index ++ self.last_block = vi.end_block or 0 ++ ++ ++def main(): ++ """ ++ Start/end here ++ """ ++ # if python is run setuid, it's only partway set, ++ # so make sure to run with euid/egid of root ++ if os.geteuid() == 0: ++ # make sure uid/gid match euid/egid ++ os.setuid(os.geteuid()) ++ os.setgid(os.getegid()) ++ ++ # set the current time strings (make it available for command line processing) ++ dup_time.setcurtime() ++ ++ # determine what action we're performing and process command line ++ action = commandline.ProcessCommandLine(sys.argv[1:]) ++ ++ # set the current time strings again now that we have time separator ++ if globals.current_time: ++ dup_time.setcurtime(globals.current_time) ++ else: ++ dup_time.setcurtime() ++ ++ # log some debugging status info ++ log_startup_parms(log.INFO) ++ ++ # check for disk space and available file handles ++ check_resources(action) ++ ++ # check archive synch with remote, fix if needed ++ decrypt = action not in ["collection-status"] ++ sync_archive(decrypt) ++ ++ # get current collection status ++ col_stats = collections.CollectionsStatus(globals.backend, ++ globals.archive_dir).set_values() ++ ++ while True: ++ # if we have to clean up the last partial, then col_stats are invalidated ++ # and we have to start the process all over again until clean. ++ if action in ["full", "inc", "cleanup"]: ++ last_full_chain = col_stats.get_last_backup_chain() ++ if not last_full_chain: ++ break ++ last_backup = last_full_chain.get_last() ++ if last_backup.partial: ++ if action in ["full", "inc"]: ++ # set restart parms from last_backup info ++ globals.restart = Restart(last_backup) ++ # (possibly) reset action ++ action = globals.restart.type ++ # reset the time strings ++ if action == "full": ++ dup_time.setcurtime(globals.restart.time) ++ else: ++ dup_time.setcurtime(globals.restart.end_time) ++ dup_time.setprevtime(globals.restart.start_time) ++ # log it -- main restart heavy lifting is done in write_multivol ++ log.Notice(_("Last %s backup left a partial set, restarting." % action)) ++ break ++ else: ++ # remove last partial backup and get new collection status ++ log.Notice(_("Cleaning up previous partial %s backup set, restarting." % action)) ++ last_backup.delete() ++ col_stats = collections.CollectionsStatus(globals.backend, ++ globals.archive_dir).set_values() ++ continue ++ break ++ break ++ ++ # OK, now we have a stable collection ++ last_full_time = col_stats.get_last_full_backup_time() ++ if last_full_time > 0: ++ log.Notice(_("Last full backup date:") + " " + dup_time.timetopretty(last_full_time)) ++ else: ++ log.Notice(_("Last full backup date: none")) ++ if not globals.restart and action == "inc" and last_full_time < globals.full_force_time: ++ log.Notice(_("Last full backup is too old, forcing full backup")) ++ action = "full" ++ log.PrintCollectionStatus(col_stats) ++ ++ os.umask(077) ++ ++ # full/inc only needs a passphrase for symmetric keys ++ if not action in ["full", "inc"] or not globals.gpg_profile.recipients: ++ # get the passphrase if we need to based on action/options ++ globals.gpg_profile.passphrase = get_passphrase(1, action) ++ ++ if action == "restore": ++ restore(col_stats) ++ elif action == "verify": ++ verify(col_stats) ++ elif action == "list-current": ++ list_current(col_stats) ++ elif action == "collection-status": ++ log.PrintCollectionStatus(col_stats, True) ++ elif action == "cleanup": ++ cleanup(col_stats) ++ elif action == "remove-old": ++ remove_old(col_stats) ++ elif action == "remove-all-but-n-full" or action == "remove-all-inc-of-but-n-full": ++ remove_all_but_n_full(col_stats) ++ elif action == "sync": ++ sync_archive(True) ++ else: ++ assert action == "inc" or action == "full", action ++ # the passphrase for full and inc is used by --sign-key ++ # the sign key can have a different passphrase than the encrypt ++ # key, therefore request a passphrase ++ if globals.gpg_profile.sign_key: ++ globals.gpg_profile.signing_passphrase = get_passphrase(3, action, True) ++ ++ # if there are no recipients (no --encrypt-key), it must be a ++ # symmetric key. Therefore, confirm the passphrase ++ if not globals.gpg_profile.recipients: ++ globals.gpg_profile.passphrase = get_passphrase(2, action) ++ # a limitation in the GPG implementation does not allow for ++ # inputting different passphrases, this affects symmetric+sign. ++ # Allow an empty passphrase for the key though to allow a non-empty ++ # symmetric key ++ if (globals.gpg_profile.signing_passphrase and ++ globals.gpg_profile.passphrase != globals.gpg_profile.signing_passphrase): ++ log.FatalError("When using symmetric encryption, the signing passphrase must equal the encryption passphrase.", log.ErrorCode.user_error) ++ ++ if action == "full": ++ full_backup(col_stats) ++ else: # attempt incremental ++ sig_chain = check_sig_chain(col_stats) ++ # action == "inc" was requested, but no full backup is available ++ if not sig_chain: ++ full_backup(col_stats) ++ else: ++ if not globals.restart: ++ # only ask for a passphrase if there was a previous backup ++ if col_stats.all_backup_chains: ++ globals.gpg_profile.passphrase = get_passphrase(1, action) ++ check_last_manifest(col_stats) # not needed for full backup ++ incremental_backup(sig_chain) ++ globals.backend.close() ++ log.shutdown() ++ if exit_val is not None: ++ sys.exit(exit_val) ++ ++ ++def with_tempdir(fn): ++ """ ++ Execute function and guarantee cleanup of tempdir is called ++ ++ @type fn: callable function ++ @param fn: function to execute ++ ++ @return: void ++ @rtype: void ++ """ ++ try: ++ fn() ++ finally: ++ tempdir.default().cleanup() ++ ++ ++if __name__ == "__main__": ++ # The following is for starting remote debugging in Eclipse with Pydev. ++ # Adjust the path to your location and version of Eclipse and Pydev. Comment out ++ # to run normally, or this process will hang at pydevd.settrace() waiting for the ++ # remote debugger to start. ++# pysrc = "/opt/Aptana Studio 2/plugins/org.python.pydev.debug_2.1.0.2011052613/pysrc/" ++# sys.path.append(pysrc) ++# import pydevd #@UnresolvedImport ++# pydevd.settrace() ++ # end remote debugger startup ++ ++ try: ++ with_tempdir(main) ++ ++ # Don't move this lower. In order to get an exit ++ # status out of the system, you have to call the ++ # sys.exit() function. Python handles this by ++ # raising the SystemExit exception. Cleanup code ++ # goes here, if needed. ++ except SystemExit, e: ++ # No traceback, just get out ++ sys.exit(e) ++ ++ except KeyboardInterrupt, e: ++ # No traceback, just get out ++ log.Info(_("INT intercepted...exiting.")) ++ sys.exit(4) ++ ++ except gpg.GPGError, e: ++ # For gpg errors, don't show an ugly stack trace by ++ # default. But do with sufficient verbosity. ++ log.Info(_("GPG error detail: %s") ++ % (''.join(traceback.format_exception(*sys.exc_info())))) ++ log.FatalError("%s: %s" % (e.__class__.__name__, e.args[0]), ++ log.ErrorCode.gpg_failed, ++ e.__class__.__name__) ++ ++ except duplicity.errors.UserError, e: ++ # For user errors, don't show an ugly stack trace by ++ # default. But do with sufficient verbosity. ++ log.Info(_("User error detail: %s") ++ % (''.join(traceback.format_exception(*sys.exc_info())))) ++ log.FatalError("%s: %s" % (e.__class__.__name__, str(e)), ++ log.ErrorCode.user_error, ++ e.__class__.__name__) ++ ++ except duplicity.errors.BackendException, e: ++ # For backend errors, don't show an ugly stack trace by ++ # default. But do with sufficient verbosity. ++ log.Info(_("Backend error detail: %s") ++ % (''.join(traceback.format_exception(*sys.exc_info())))) ++ log.FatalError("%s: %s" % (e.__class__.__name__, str(e)), ++ log.ErrorCode.user_error, ++ e.__class__.__name__) ++ ++ except Exception, e: ++ if "Forced assertion for testing" in str(e): ++ log.FatalError("%s: %s" % (e.__class__.__name__, str(e)), ++ log.ErrorCode.exception, ++ e.__class__.__name__) ++ else: ++ # Traceback and that mess ++ log.FatalError("%s" % (''.join(traceback.format_exception(*sys.exc_info()))), ++ log.ErrorCode.exception, ++ e.__class__.__name__) --- duplicity-0.6.15.orig/debian/patches/05upstreamgpgintf.dpatch +++ duplicity-0.6.15/debian/patches/05upstreamgpgintf.dpatch @@ -0,0 +1,19 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 05upstreamgpgintf.dpatch by +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: ensure that the public gnupginterface module is imported + +@DPATCH@ +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.12~/src/gpg.py duplicity-0.6.12/src/gpg.py +--- duplicity-0.6.12~/src/gpg.py 2011-03-09 05:54:31.000000000 +1000 ++++ duplicity-0.6.12/src/gpg.py 2011-03-13 00:31:52.648673991 +1000 +@@ -27,7 +27,7 @@ + + from duplicity import misc + from duplicity import globals +-from duplicity import GnuPGInterface ++import GnuPGInterface + + try: + from hashlib import sha1 --- duplicity-0.6.15.orig/debian/patches/03forcecleanup.dpatch +++ duplicity-0.6.15/debian/patches/03forcecleanup.dpatch @@ -0,0 +1,75 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 03forcecleanup.dpatch by +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: fix for #572792: we force a extra-clean cleanup for all remove* ops + +@DPATCH@ +diff -urNad duplicity-0.6.12~/duplicity duplicity-0.6.12/duplicity +--- duplicity-0.6.12~/duplicity 2011-03-12 19:45:46.000000000 +1000 ++++ duplicity-0.6.12/duplicity 2011-03-12 19:50:23.011966125 +1000 +@@ -774,12 +774,24 @@ + log.Notice("Deleting set " + set.type + " " + dup_time.timetopretty(set.get_time())) + set.delete() + col_stats.set_values(sig_chain_warning=None) ++ ++ # force a cleanup operation to get rid of unnecessary old cruft ++ # we said we want to remove them! didn't we, huh? ++ # bad duplicity, bad doggy! ++ # note: in the long run backing out changeset 616 might be ++ # better, but for now this will ease the pain. ++ globals.extra_clean=True ++ cleanup(col_stats) + else: + log.Notice(gettext.ngettext("Found old backup set at the following time:", + "Found old backup sets at the following times:", + len(setlist)) + + "\n" + set_times_str(setlist) + "\n" + + _("Rerun command with --force option to actually delete.")) ++ # see above for rationale. ++ # this here is to print a list of to-be-removed files (--force is off) ++ globals.extra_clean=True ++ cleanup(col_stats) + + + def sync_archive(): +diff -urNad duplicity-0.6.12~/duplicity.1 duplicity-0.6.12/duplicity.1 +--- duplicity-0.6.12~/duplicity.1 2011-03-12 19:45:46.000000000 +1000 ++++ duplicity-0.6.12/duplicity.1 2011-03-12 19:45:52.722966225 +1000 +@@ -178,6 +178,14 @@ + the other hand if the archive has been deleted or corrupted, this + command may not detect it. + ++.B Note: ++the Debian version of duplicity automatically runs a ++cleanup --extra-clean whenever old backup sets are removed (i.e. if one ++of the remove commands is run with the --force option present and ++if something removable is found). This is to ++limit the amount of old outdated material that otherwise accumulates ++in the archive dir. ++ + .TP + .BI "remove-older-than " time + Delete all backup sets older than the given time. Old backup sets +@@ -223,6 +231,9 @@ + .I --force + will be needed to delete the files rather than just list them. + ++The note regarding automatic cleanups above ++also applies to remove-all-but-n-full. ++ + .TP + .B verify + Enter verify mode instead of restore. If the --file-to-restore option +diff -urNad duplicity-0.6.12~/src/collections.py duplicity-0.6.12/src/collections.py +--- duplicity-0.6.12~/src/collections.py 2011-03-12 19:45:52.673966360 +1000 ++++ duplicity-0.6.12/src/collections.py 2011-03-12 19:45:52.722966225 +1000 +@@ -991,8 +991,6 @@ + if self.matched_chain_pair: + matched_sig_chain = self.matched_chain_pair[0] + for sig_chain in self.all_sig_chains: +- print sig_chain.start_time, matched_sig_chain.start_time, +- print sig_chain.end_time, matched_sig_chain.end_time + if (sig_chain.start_time == matched_sig_chain.start_time and + sig_chain.end_time == matched_sig_chain.end_time): + old_sig_chains.remove(sig_chain) --- duplicity-0.6.15.orig/debian/patches/01pexpect.dpatch +++ duplicity-0.6.15/debian/patches/01pexpect.dpatch @@ -0,0 +1,19 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 01pexpect.dpatch by +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: import global pexpect module, not a local one (#556095) + +@DPATCH@ +diff -urNad duplicity-0.6.08b~/src/backends/sshbackend.py duplicity-0.6.08b/src/backends/sshbackend.py +--- duplicity-0.6.08b~/src/backends/sshbackend.py 2010-03-12 11:39:06.000000000 +1000 ++++ duplicity-0.6.08b/src/backends/sshbackend.py 2010-03-15 20:39:42.163263716 +1000 +@@ -31,7 +31,7 @@ + import duplicity.backend + from duplicity import globals + from duplicity import log +-from duplicity import pexpect ++import pexpect + from duplicity.errors import * #@UnusedWildImport + + class SSHBackend(duplicity.backend.Backend): --- duplicity-0.6.15.orig/debian/patches/02cachedesync.dpatch +++ duplicity-0.6.15/debian/patches/02cachedesync.dpatch @@ -0,0 +1,26 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 02cachedesync.dpatch by +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: fix for (upstream)#497243: remove causes cache desynchronization +## DP: backed out the attempted fix, as it only dealt with the symptomps +## DP: and not the root-cause (which is the ill-advised change-set 616) + +@DPATCH@ +diff -urNad duplicity-0.6.08b~/src/collections.py duplicity-0.6.08b/src/collections.py +--- duplicity-0.6.08b~/src/collections.py 2010-03-12 11:39:06.000000000 +1000 ++++ duplicity-0.6.08b/src/collections.py 2010-03-15 20:41:30.904266406 +1000 +@@ -145,12 +145,7 @@ + if (pr + and pr.time == self.time + and pr.start_time == self.start_time +- and pr.end_time == self.end_time +- and pr.type != "new-sig" ): +- # do not remove new sigs from the cache: +- # they aren't removed from the remote archive, +- # and subsequent backups will have to resync +- # which is bad if running non-interactive with encrypt-key ++ and pr.end_time == self.end_time): + try: + globals.archive_dir.append(lfn).delete() + except Exception: --- duplicity-0.6.15.orig/debian/patches/08_check_volumes.dpatch +++ duplicity-0.6.15/debian/patches/08_check_volumes.dpatch @@ -0,0 +1,324 @@ +#! /bin/sh /usr/share/dpatch/dpatch-run +## 08_check_volumes.dpatch by Michael Terry +## +## All lines beginning with `## DP:' are a description of the patch. +## DP: No description. + +@DPATCH@ +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/duplicity duplicity-0.6.15/duplicity +--- duplicity-0.6.15~/duplicity 2011-09-08 09:07:21.981181450 -0400 ++++ duplicity-0.6.15/duplicity 2011-09-08 09:10:06.997183197 -0400 +@@ -269,14 +269,28 @@ + end_block -= 1 + return start_index, start_block, end_index, end_block + +- def put(tdp, dest_filename): ++ def validate_block(orig_size, dest_filename): ++ info = backend.query_info([dest_filename])[dest_filename] ++ if 'size' not in info: ++ return # backend didn't know how to query size ++ size = info['size'] ++ if size is None: ++ return # error querying file ++ if size != orig_size: ++ code_extra = "%s %d %d" % (util.escape(dest_filename), orig_size, size) ++ log.FatalError(_("File %s was corrupted during upload.") % dest_filename, ++ log.ErrorCode.volume_wrong_size, code_extra) ++ ++ def put(tdp, dest_filename, vol_num): + """ + Retrieve file size *before* calling backend.put(), which may (at least + in case of the localbackend) rename the temporary file to the target + instead of copying. + """ + putsize = tdp.getsize() +- backend.put(tdp, dest_filename) ++ if globals.skip_volume != vol_num: # for testing purposes only ++ backend.put(tdp, dest_filename) ++ validate_block(putsize, dest_filename) + if tdp.stat: + tdp.delete() + return putsize +@@ -350,8 +364,8 @@ + sig_outfp.flush() + man_outfp.flush() + +- async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename: put(tdp, dest_filename), +- (tdp, dest_filename))) ++ async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename, vol_num: put(tdp, dest_filename, vol_num), ++ (tdp, dest_filename, vol_num))) + + # Log human-readable version as well as raw numbers for machine consumers + log.Progress('Processed volume %d' % vol_num, diffdir.stats.SourceFileSize) +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backend.py duplicity-0.6.15/src/backend.py +--- duplicity-0.6.15~/src/backend.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/backend.py 2011-09-08 09:10:06.997183197 -0400 +@@ -361,6 +361,35 @@ + """ + raise NotImplementedError() + ++ # Should never cause FatalError. ++ # Returns a dictionary of dictionaries. The outer dictionary maps ++ # filenames to metadata dictionaries. Supported metadata are: ++ # ++ # 'size': if >= 0, size of file ++ # if -1, file is not found ++ # if None, error querying file ++ # ++ # Returned dictionary is guaranteed to contain a metadata dictionary for ++ # each filename, but not all metadata are guaranteed to be present. ++ def query_info(self, filename_list, raise_errors=True): ++ """ ++ Return metadata about each filename in filename_list ++ """ ++ info = {} ++ if hasattr(self, '_query_list_info'): ++ info = self._query_list_info(filename_list) ++ elif hasattr(self, '_query_file_info'): ++ for filename in filename_list: ++ info[filename] = self._query_file_info(filename) ++ ++ # Fill out any missing entries (may happen if backend has no support ++ # or its query_list support is lazy) ++ for filename in filename_list: ++ if filename not in info: ++ info[filename] = {} ++ ++ return info ++ + """ use getpass by default, inherited backends may overwrite this behaviour """ + use_getpass = True + +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backends/botobackend.py duplicity-0.6.15/src/backends/botobackend.py +--- duplicity-0.6.15~/src/backends/botobackend.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/backends/botobackend.py 2011-09-08 09:10:06.997183197 -0400 +@@ -26,6 +26,7 @@ + from duplicity import log + from duplicity.errors import * #@UnusedWildImport + from duplicity.util import exception_traceback ++from duplicity.backend import retry + + class BotoBackend(duplicity.backend.Backend): + """ +@@ -294,6 +295,24 @@ + self.bucket.delete_key(self.key_prefix + filename) + log.Debug("Deleted %s/%s" % (self.straight_url, filename)) + ++ @retry ++ def _query_file_info(self, filename, raise_errors=False): ++ try: ++ key = self.bucket.lookup(self.key_prefix + filename) ++ if key is None: ++ return {'size': -1} ++ return {'size': key.size} ++ except Exception, e: ++ log.Warn("Query %s/%s failed: %s" ++ "" % (self.straight_url, ++ filename, ++ str(e))) ++ self.resetConnection() ++ if raise_errors: ++ raise e ++ else: ++ return {'size': None} ++ + duplicity.backend.register_backend("s3", BotoBackend) + duplicity.backend.register_backend("s3+http", BotoBackend) + +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backends/cloudfilesbackend.py duplicity-0.6.15/src/backends/cloudfilesbackend.py +--- duplicity-0.6.15~/src/backends/cloudfilesbackend.py 2011-09-08 09:07:22.005181450 -0400 ++++ duplicity-0.6.15/src/backends/cloudfilesbackend.py 2011-09-08 09:10:07.001183197 -0400 +@@ -26,6 +26,7 @@ + from duplicity import log + from duplicity.errors import * #@UnusedWildImport + from duplicity.util import exception_traceback ++from duplicity.backend import retry + + class CloudFilesBackend(duplicity.backend.Backend): + """ +@@ -146,4 +147,22 @@ + self.container.delete_object(file) + log.Debug("Deleted '%s/%s'" % (self.container, file)) + ++ @retry ++ def _query_file_info(self, filename, raise_errors=False): ++ from cloudfiles.errors import NoSuchObject ++ try: ++ sobject = self.container.get_object(filename) ++ return {'size': sobject.size} ++ except NoSuchObject: ++ return {'size': -1} ++ except Exception, e: ++ log.Warn("Error querying '%s/%s': %s" ++ "" % (self.container, ++ filename, ++ str(e))) ++ if raise_errors: ++ raise e ++ else: ++ return {'size': None} ++ + duplicity.backend.register_backend("cf+http", CloudFilesBackend) +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backends/giobackend.py duplicity-0.6.15/src/backends/giobackend.py +--- duplicity-0.6.15~/src/backends/giobackend.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/backends/giobackend.py 2011-09-08 09:10:07.001183197 -0400 +@@ -164,3 +164,20 @@ + self.handle_error(raise_errors, e, 'delete', + target_file.get_parse_name()) + return ++ ++ @retry ++ def _query_file_info(self, filename, raise_errors=False): ++ """Query attributes on filename""" ++ target_file = self.remote_file.get_child(filename) ++ attrs = gio.FILE_ATTRIBUTE_STANDARD_SIZE ++ try: ++ info = target_file.query_info(attrs, gio.FILE_QUERY_INFO_NONE) ++ return {'size': info.get_size()} ++ except Exception, e: ++ if isinstance(e, gio.Error): ++ if e.code == gio.ERROR_NOT_FOUND: ++ return {'size': -1} # early exit, no need to retry ++ if raise_errors: ++ raise e ++ else: ++ return {'size': None} +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backends/localbackend.py duplicity-0.6.15/src/backends/localbackend.py +--- duplicity-0.6.15~/src/backends/localbackend.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/backends/localbackend.py 2011-09-08 09:10:07.001183197 -0400 +@@ -57,7 +57,7 @@ + code = log.ErrorCode.backend_no_space + extra = ' '.join([util.escape(x) for x in [file1, file2] if x]) + extra = ' '.join([op, extra]) +- if op != 'delete': ++ if op != 'delete' and op != 'query': + log.FatalError(str(e), code, extra) + else: + log.Warn(str(e), code, extra) +@@ -110,5 +110,17 @@ + except Exception, e: + self.handle_error(e, 'delete', self.remote_pathdir.append(filename).name) + ++ def _query_file_info(self, filename): ++ """Query attributes on filename""" ++ try: ++ target_file = self.remote_pathdir.append(filename) ++ if not os.path.exists(target_file.name): ++ return {'size': -1} ++ target_file.setdata() ++ size = target_file.getsize() ++ return {'size': size} ++ except Exception, e: ++ self.handle_error(e, 'query', target_file.name) ++ return {'size': None} + + duplicity.backend.register_backend("file", LocalBackend) +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/backends/u1backend.py duplicity-0.6.15/src/backends/u1backend.py +--- duplicity-0.6.15~/src/backends/u1backend.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/backends/u1backend.py 2011-09-08 09:10:07.001183197 -0400 +@@ -98,17 +98,15 @@ + import urllib + return urllib.quote(url, safe="/~") + +- def handle_error(self, raise_error, op, headers, file1=None, file2=None, ignore=None): ++ def parse_error(self, headers, ignore=None): + from duplicity import log +- from duplicity import util +- import json + + status = int(headers[0].get('status')) + if status >= 200 and status < 300: +- return ++ return None + + if ignore and status in ignore: +- return ++ return None + + if status == 400: + code = log.ErrorCode.backend_permission_denied +@@ -118,6 +116,18 @@ + code = log.ErrorCode.backend_no_space + else: + code = log.ErrorCode.backend_error ++ return code ++ ++ def handle_error(self, raise_error, op, headers, file1=None, file2=None, ignore=None): ++ from duplicity import log ++ from duplicity import util ++ import json ++ ++ code = self.parse_error(headers, ignore) ++ if code is None: ++ return ++ ++ status = int(headers[0].get('status')) + + if file1: + file1 = file1.encode("utf8") +@@ -222,5 +232,27 @@ + answer = auth.request(remote_full, http_method="DELETE") + self.handle_error(raise_errors, 'delete', answer, remote_full, ignore=[404]) + ++ @retry ++ def _query_file_info(self, filename, raise_errors=False): ++ """Query attributes on filename""" ++ import json ++ import ubuntuone.couch.auth as auth ++ from duplicity import log ++ remote_full = self.meta_base + self.quote(filename) ++ answer = auth.request(remote_full) ++ ++ code = self.parse_error(answer) ++ if code is not None: ++ if code == log.ErrorCode.backend_not_found: ++ return {'size': -1} ++ elif raise_errors: ++ self.handle_error(raise_errors, 'query', answer, remote_full, filename) ++ else: ++ return {'size': None} ++ ++ node = json.loads(answer[1]) ++ size = node.get('size') ++ return {'size': size} ++ + duplicity.backend.register_backend("u1", U1Backend) + duplicity.backend.register_backend("u1+http", U1Backend) +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/commandline.py duplicity-0.6.15/src/commandline.py +--- duplicity-0.6.15~/src/commandline.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/commandline.py 2011-09-08 09:10:07.005183197 -0400 +@@ -292,6 +292,10 @@ + parser.add_option("--fail-on-volume", type="int", + help=optparse.SUPPRESS_HELP) + ++ # used in testing only - skips upload for a given volume ++ parser.add_option("--skip-volume", type="int", ++ help=optparse.SUPPRESS_HELP) ++ + # If set, restore only the subdirectory or file specified, not the + # whole root. + # TRANSL: Used in usage help to represent a Unix-style path name. Example: +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/globals.py duplicity-0.6.15/src/globals.py +--- duplicity-0.6.15~/src/globals.py 2011-08-19 14:27:00.000000000 -0400 ++++ duplicity-0.6.15/src/globals.py 2011-09-08 09:10:07.005183197 -0400 +@@ -200,6 +200,9 @@ + # used in testing only - raises exception after volume + fail_on_volume = 0 + ++# used in testing only - skips uploading a particular volume ++skip_volume = 0 ++ + # ignore (some) errors during operations; supposed to make it more + # likely that you are able to restore data under problematic + # circumstances. the default should absolutely always be True unless +diff -urNad '--exclude=CVS' '--exclude=.svn' '--exclude=.git' '--exclude=.arch' '--exclude=.hg' '--exclude=_darcs' '--exclude=.bzr' duplicity-0.6.15~/src/log.py duplicity-0.6.15/src/log.py +--- duplicity-0.6.15~/src/log.py 2011-08-19 14:26:58.000000000 -0400 ++++ duplicity-0.6.15/src/log.py 2011-09-08 09:10:07.005183197 -0400 +@@ -189,6 +189,7 @@ + gio_not_available = 40 + source_dir_mismatch = 42 # 41 is reserved for par2 + ftps_lftp_missing = 43 ++ volume_wrong_size = 44 + + # 50->69 reserved for backend errors + backend_error = 50 --- duplicity-0.6.15.orig/debian/examples/system-backup +++ duplicity-0.6.15/debian/examples/system-backup @@ -0,0 +1,116 @@ +#!/bin/bash +# +# Copyright (C) 2007 Francois Marier +# +# This backup script is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of the +# License, or (at your option) any later version. +# +# This script is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Email-Reminder; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# Usage: +# backup automatic backup +# backup --full force a full backup +# backup --list-current-files list of files currently backed up +# backup --file-to-restore directory/foo restore the given file as "./restored_foo" +# +# Note that the following files should be in the same directory as this script: +# +# include Files and directories to include in the backup (one per line) +# exclude From the directories previously mentioned, which ones to omit +# id_rsa Private ssh key for the backup host +# id_rsa.pub Public ssh key for the backup host (copied there as .ssh/authorized_keys) +# known_hosts Contains the fingerprints for the backup host + +# -----------START OF USER CONFIGURATION------------ + +# HINT: Generate a good passphrase using "pwgen -s 16" +GPG_PASSWORD="" + +# The directory where this script and all the configuration files are located +BACKUP_HOME="/home/my_username/.backup" + +# The name of the database to backup +WIKIDB="wikidb" + +# The MySQL root password +MYSQL_PASSWORD="mysql_root_password" + +# To limit the SCP transfers to a certain number of bytes per second +SCP_LIMIT="520" + +# How many days to keep the old backups +OLDAGE="9D" + +# Which host (using ssh) to copy the backup to +SSH_HOST="username@some_host.somewhere.net" + +# -----------END OF USER CONFIGURATION------------ + +# Internal variables +SSH_IDENTITY="IdentityFile=$BACKUP_HOME/id_rsa" +SSH_HOSTKEY="UserKnownHostsFile=$BACKUP_HOME/known_hosts" +SSH="ssh -o $SSH_IDENTITY -o $SSH_HOSTKEY" +SCP="scp -q -l $SCP_LIMIT -o $SSH_IDENTITY -o $SSH_HOSTKEY" +SFTP="sftp -o $SSH_IDENTITY -o $SSH_HOSTKEY" +INCLUDE_FILE="$BACKUP_HOME/include" +EXCLUDE_FILE="$BACKUP_HOME/exclude" +DUMP_FILE="$BACKUP_HOME/$WIKIDB-dump.sql" +PKG_FILE="$BACKUP_HOME/dpkg-selections" + +# Create the backup directory in case it doesn't exist +$SSH $SSH_HOST mkdir -p $HOSTNAME + +# If the list of files has been requested, only do that +if [ "$1" = "--list-current-files" ]; then + SCP="scp -q -o $SSH_IDENTITY -o $SSH_HOSTKEY" + PASSPHRASE=$GPG_PASSWORD duplicity --list-current-files --ssh-command "$SSH" --scp-command "$SCP" --sftp-command "$SFTP" scp://$SSH_HOST/$HOSTNAME + exit 0 + +# Restore the given file +elif [ "$1" = "--file-to-restore" ]; then + if [ "$2" = "" ]; then + echo "You must specify a file to restore" + exit 2 + fi + SCP="scp -q -o $SSH_IDENTITY -o $SSH_HOSTKEY" + PASSPHRASE=$GPG_PASSWORD duplicity --ssh-command "$SSH" --scp-command "$SCP" --sftp-command "$SFTP" --file-to-restore "$2" scp://$SSH_HOST/$HOSTNAME restored_`basename $2` + exit 0 + +# Catch invalid arguments +elif [ "$1" != "--full" -a "$1" != "" ]; then + echo "Invalid argument: $1" + exit 1 +fi + +# Delete files related to failed backups +PASSPHRASE=$GPG_PASSWORD duplicity --cleanup --verbosity 1 --sftp-command "$SFTP" scp://$SSH_HOST/$HOSTNAME + +# Delete old expired backups +PASSPHRASE=$GPG_PASSWORD duplicity --force --remove-older-than $OLDAGE --verbosity 1 --sftp-command "$SFTP" scp://$SSH_HOST/$HOSTNAME + +# Dump Wiki DB and list of Debian packages +mysqldump --opt $WIKIDB -uroot -p$MYSQL_PASSWORD > $DUMP_FILE +dpkg --get-selections > $PKG_FILE + +# Check whether a full backup was requested +FULLBACKUP="" +if [ "$1" = "--full" ]; then + FULLBACKUP="--full" +fi + +# Do the actual backup using Duplicity +PASSPHRASE=$GPG_PASSWORD duplicity $FULLBACKUP --no-print-statistics --remove-older-than $OLDAGE --verbosity 1 --exclude-device-files --include $PKG_FILE --include $DUMP_FILE --exclude-globbing-filelist $EXCLUDE_FILE --include-globbing-filelist $INCLUDE_FILE --exclude '**' --ssh-command "$SSH" --scp-command "$SCP" --sftp-command "$SFTP" / scp://$SSH_HOST/$HOSTNAME + +# Cleanup the temporary files +rm -f $DUMP_FILE +rm -f $PKG_FILE