mirror of
https://github.com/python/cpython
synced 2024-10-17 10:23:24 +00:00
00ee7baf49
which unfortunately means the errors from the bytes type change somewhat: bytes([300]) still raises a ValueError, but bytes([10**100]) now raises a TypeError (either that, or bytes(1.0) also raises a ValueError -- PyNumber_AsSsize_t() can only raise one type of exception.) Merged revisions 51188-51433 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r51189 | kurt.kaiser | 2006-08-10 19:11:09 +0200 (Thu, 10 Aug 2006) | 4 lines Retrieval of previous shell command was not always preserving indentation since 1.2a1) Patch 1528468 Tal Einat. ........ r51190 | guido.van.rossum | 2006-08-10 19:41:07 +0200 (Thu, 10 Aug 2006) | 3 lines Chris McDonough's patch to defend against certain DoS attacks on FieldStorage. SF bug #1112549. ........ r51191 | guido.van.rossum | 2006-08-10 19:42:50 +0200 (Thu, 10 Aug 2006) | 2 lines News item for SF bug 1112549. ........ r51192 | guido.van.rossum | 2006-08-10 20:09:25 +0200 (Thu, 10 Aug 2006) | 2 lines Fix title -- it's rc1, not beta3. ........ r51194 | martin.v.loewis | 2006-08-10 21:04:00 +0200 (Thu, 10 Aug 2006) | 3 lines Update dangling references to the 3.2 database to mention that this is UCD 4.1 now. ........ r51195 | tim.peters | 2006-08-11 00:45:34 +0200 (Fri, 11 Aug 2006) | 6 lines Followup to bug #1069160. PyThreadState_SetAsyncExc(): internal correctness changes wrt refcount safety and deadlock avoidance. Also added a basic test case (relying on ctypes) and repaired the docs. ........ r51196 | tim.peters | 2006-08-11 00:48:45 +0200 (Fri, 11 Aug 2006) | 2 lines Whitespace normalization. ........ r51197 | tim.peters | 2006-08-11 01:22:13 +0200 (Fri, 11 Aug 2006) | 5 lines Whitespace normalization broke test_cgi, because a line of quoted test data relied on preserving a single trailing blank. Changed the string from raw to regular, and forced in the trailing blank via an explicit \x20 escape. ........ r51198 | tim.peters | 2006-08-11 02:49:01 +0200 (Fri, 11 Aug 2006) | 10 lines test_PyThreadState_SetAsyncExc(): This is failing on some 64-bit boxes. I have no idea what the ctypes docs mean by "integers", and blind-guessing here that it intended to mean the signed C "int" type, in which case perhaps I can repair this by feeding the thread id argument to type ctypes.c_long(). Also made the worker thread daemonic, so it doesn't hang Python shutdown if the test continues to fail. ........ r51199 | tim.peters | 2006-08-11 05:49:10 +0200 (Fri, 11 Aug 2006) | 6 lines force_test_exit(): This has been completely ineffective at stopping test_signal from hanging forever on the Tru64 buildbot. That could be because there's no such thing as signal.SIGALARM. Changed to the idiotic (but standard) signal.SIGALRM instead, and added some more debug output. ........ r51202 | neal.norwitz | 2006-08-11 08:09:41 +0200 (Fri, 11 Aug 2006) | 6 lines Fix the failures on cygwin (2006-08-10 fixed the actual locking issue). The first hunk changes the colon to an ! like other Windows variants. We need to always wait on the child so the lock gets released and no other tests fail. This is the try/finally in the second hunk. ........ r51205 | georg.brandl | 2006-08-11 09:15:38 +0200 (Fri, 11 Aug 2006) | 3 lines Add Chris McDonough (latest cgi.py patch) ........ r51206 | georg.brandl | 2006-08-11 09:26:10 +0200 (Fri, 11 Aug 2006) | 3 lines logging's atexit hook now runs even if the rest of the module has already been cleaned up. ........ r51212 | thomas.wouters | 2006-08-11 17:02:39 +0200 (Fri, 11 Aug 2006) | 4 lines Add ignore of *.pyc and *.pyo to Lib/xml/etree/. ........ r51215 | thomas.heller | 2006-08-11 21:55:35 +0200 (Fri, 11 Aug 2006) | 7 lines When a ctypes C callback function is called, zero out the result storage before converting the result to C data. See the comment in the code for details. Provide a better context for errors when the conversion of a callback function's result cannot be converted. ........ r51218 | neal.norwitz | 2006-08-12 03:43:40 +0200 (Sat, 12 Aug 2006) | 6 lines Klocwork made another run and found a bunch more problems. This is the first batch of fixes that should be easy to verify based on context. This fixes problem numbers: 220 (ast), 323-324 (symtable), 321-322 (structseq), 215 (array), 210 (hotshot), 182 (codecs), 209 (etree). ........ r51219 | neal.norwitz | 2006-08-12 03:45:47 +0200 (Sat, 12 Aug 2006) | 9 lines Even though _Py_Mangle() isn't truly public anyone can call it and there was no verification that privateobj was a PyString. If it wasn't a string, this could have allowed a NULL pointer to creep in below and crash. I wonder if this should be PyString_CheckExact? Must identifiers be strings or can they be subclasses? Klocwork #275 ........ r51220 | neal.norwitz | 2006-08-12 03:46:42 +0200 (Sat, 12 Aug 2006) | 5 lines It's highly unlikely, though possible for PyEval_Get*() to return NULLs. So be safe and do an XINCREF. Klocwork # 221-222. ........ r51221 | neal.norwitz | 2006-08-12 03:47:59 +0200 (Sat, 12 Aug 2006) | 7 lines This code is actually not used unless WITHOUT_COMPLEX is defined. However, there was no error checking that PyFloat_FromDouble returned a valid pointer. I believe this change is correct as it seemed to follow other code in the area. Klocwork # 292. ........ r51222 | neal.norwitz | 2006-08-12 03:49:12 +0200 (Sat, 12 Aug 2006) | 5 lines Handle NULL nodes while parsing. I'm not entirely sure this is correct. There might be something else that needs to be done to setup the error. Klocwork #295. ........ r51223 | neal.norwitz | 2006-08-12 03:50:38 +0200 (Sat, 12 Aug 2006) | 6 lines If _stat_float_times is false, we will try to INCREF ival which could be NULL. Return early in that case. The caller checks for PyErr_Occurred so this should be ok. Klocwork #297 ........ r51224 | neal.norwitz | 2006-08-12 03:51:12 +0200 (Sat, 12 Aug 2006) | 3 lines Move the assert which checks for a NULL pointer first. Klocwork #274. ........ r51225 | neal.norwitz | 2006-08-12 03:53:28 +0200 (Sat, 12 Aug 2006) | 5 lines Try to handle a malloc failure. I'm not entirely sure this is correct. There might be something else we need to do to handle the exception. Klocwork # 212-213 ........ r51226 | neal.norwitz | 2006-08-12 03:57:47 +0200 (Sat, 12 Aug 2006) | 6 lines I'm not sure why this code allocates this string for the error message. I think it would be better to always use snprintf and have the format limit the size of the name appropriately (like %.200s). Klocwork #340 ........ r51227 | neal.norwitz | 2006-08-12 04:06:34 +0200 (Sat, 12 Aug 2006) | 3 lines Check returned pointer is valid. Klocwork #233 ........ r51228 | neal.norwitz | 2006-08-12 04:12:30 +0200 (Sat, 12 Aug 2006) | 1 line Whoops, how did that get in there. :-) Revert all the parts of 51227 that were not supposed to go it. Only Modules/_ctypes/cfields.c was supposed to be changed ........ r51229 | neal.norwitz | 2006-08-12 04:33:36 +0200 (Sat, 12 Aug 2006) | 4 lines Don't deref v if it's NULL. Klocwork #214 ........ r51230 | neal.norwitz | 2006-08-12 05:16:54 +0200 (Sat, 12 Aug 2006) | 5 lines Check return of PyMem_MALLOC (garbage) is non-NULL. Check seq in both portions of if/else. Klocwork #289-290. ........ r51231 | neal.norwitz | 2006-08-12 05:17:41 +0200 (Sat, 12 Aug 2006) | 4 lines PyModule_GetDict() can fail, produce fatal errors if this happens on startup. Klocwork #298-299. ........ r51232 | neal.norwitz | 2006-08-12 05:18:50 +0200 (Sat, 12 Aug 2006) | 5 lines Verify verdat which is returned from malloc is not NULL. Ensure we don't pass NULL to free. Klocwork #306 (at least the first part, checking malloc) ........ r51233 | tim.peters | 2006-08-12 06:42:47 +0200 (Sat, 12 Aug 2006) | 35 lines test_signal: Signal handling on the Tru64 buildbot appears to be utterly insane. Plug some theoretical insecurities in the test script: - Verify that the SIGALRM handler was actually installed. - Don't call alarm() before the handler is installed. - Move everything that can fail inside the try/finally, so the test cleans up after itself more often. - Try sending all the expected signals in force_test_exit(), not just SIGALRM. Since that was fixed to actually send SIGALRM (instead of invisibly dying with an AttributeError), we've seen that sending SIGALRM alone does not stop this from hanging. - Move the "kill the child" business into the finally clause, so the child doesn't survive test failure to send SIGALRM to other tests later (there are also baffling SIGALRM-related failures in test_socket). - Cancel the alarm in the finally clause -- if the test dies early, we again don't want SIGALRM showing up to confuse a later test. Alas, this still relies on timing luck wrt the spawned script that sends the test signals, but it's hard to see how waiting for seconds can so often be so unlucky. test_threadedsignals: curiously, this test never fails on Tru64, but doesn't normally signal SIGALRM. Anyway, fixed an obvious (but probably inconsequential) logic error. ........ r51234 | tim.peters | 2006-08-12 07:17:41 +0200 (Sat, 12 Aug 2006) | 8 lines Ah, fudge. One of the prints here actually "shouldn't be" protected by "if verbose:", which caused the test to fail on all non-Windows boxes. Note that I deliberately didn't convert this to unittest yet, because I expect it would be even harder to debug this on Tru64 after conversion. ........ r51235 | georg.brandl | 2006-08-12 10:32:02 +0200 (Sat, 12 Aug 2006) | 3 lines Repair logging test spew caused by rev. 51206. ........ r51236 | neal.norwitz | 2006-08-12 19:03:09 +0200 (Sat, 12 Aug 2006) | 8 lines Patch #1538606, Patch to fix __index__() clipping. I modified this patch some by fixing style, some error checking, and adding XXX comments. This patch requires review and some changes are to be expected. I'm checking in now to get the greatest possible review and establish a baseline for moving forward. I don't want this to hold up release if possible. ........ r51238 | neal.norwitz | 2006-08-12 20:44:06 +0200 (Sat, 12 Aug 2006) | 10 lines Fix a couple of bugs exposed by the new __index__ code. The 64-bit buildbots were failing due to inappropriate clipping of numbers larger than 2**31 with new-style classes. (typeobject.c) In reviewing the code for classic classes, there were 2 problems. Any negative value return could be returned. Always return -1 if there was an error. Also make the checks similar with the new-style classes. I believe this is correct for 32 and 64 bit boxes, including Windows64. Add a test of classic classes too. ........ r51240 | neal.norwitz | 2006-08-13 02:20:49 +0200 (Sun, 13 Aug 2006) | 1 line SF bug #1539336, distutils example code missing ........ r51245 | neal.norwitz | 2006-08-13 20:10:10 +0200 (Sun, 13 Aug 2006) | 6 lines Move/copy assert for tstate != NULL before first use. Verify that PyEval_Get{Globals,Locals} returned valid pointers. Klocwork 231-232 ........ r51246 | neal.norwitz | 2006-08-13 20:10:28 +0200 (Sun, 13 Aug 2006) | 5 lines Handle a whole lot of failures from PyString_FromInternedString(). Should fix most of Klocwork 234-272. ........ r51247 | neal.norwitz | 2006-08-13 20:10:47 +0200 (Sun, 13 Aug 2006) | 8 lines cpathname could be NULL if it was longer than MAXPATHLEN. Don't try to write the .pyc to NULL. Check results of PyList_GetItem() and PyModule_GetDict() are not NULL. Klocwork 282, 283, 285 ........ r51248 | neal.norwitz | 2006-08-13 20:11:08 +0200 (Sun, 13 Aug 2006) | 6 lines Fix segfault when doing string formatting on subclasses of long if __oct__, __hex__ don't return a string. Klocwork 308 ........ r51250 | neal.norwitz | 2006-08-13 20:11:27 +0200 (Sun, 13 Aug 2006) | 5 lines Check return result of PyModule_GetDict(). Fix a bunch of refleaks in the init of the module. This would only be found when running python -v. ........ r51251 | neal.norwitz | 2006-08-13 20:11:43 +0200 (Sun, 13 Aug 2006) | 5 lines Handle malloc and fopen failures more gracefully. Klocwork 180-181 ........ r51252 | neal.norwitz | 2006-08-13 20:12:03 +0200 (Sun, 13 Aug 2006) | 7 lines It's very unlikely, though possible that source is not a string. Verify that PyString_AsString() returns a valid pointer. (The problem can arise when zlib.decompress doesn't return a string.) Klocwork 346 ........ r51253 | neal.norwitz | 2006-08-13 20:12:26 +0200 (Sun, 13 Aug 2006) | 5 lines Handle failures from lookup. Klocwork 341-342 ........ r51254 | neal.norwitz | 2006-08-13 20:12:45 +0200 (Sun, 13 Aug 2006) | 6 lines Handle failure from PyModule_GetDict() (Klocwork 208). Fix a bunch of refleaks in the init of the module. This would only be found when running python -v. ........ r51255 | neal.norwitz | 2006-08-13 20:13:02 +0200 (Sun, 13 Aug 2006) | 4 lines Really address the issue of where to place the assert for leftblock. (Followup of Klocwork 274) ........ r51256 | neal.norwitz | 2006-08-13 20:13:36 +0200 (Sun, 13 Aug 2006) | 4 lines Handle malloc failure. Klocwork 281 ........ r51258 | neal.norwitz | 2006-08-13 20:40:39 +0200 (Sun, 13 Aug 2006) | 4 lines Handle alloca failures. Klocwork 225-228 ........ r51259 | neal.norwitz | 2006-08-13 20:41:15 +0200 (Sun, 13 Aug 2006) | 1 line Get rid of compiler warning ........ r51261 | neal.norwitz | 2006-08-14 02:51:15 +0200 (Mon, 14 Aug 2006) | 1 line Ignore pgen.exe and kill_python.exe for cygwin ........ r51262 | neal.norwitz | 2006-08-14 02:59:03 +0200 (Mon, 14 Aug 2006) | 4 lines Can't return NULL from a void function. If there is a memory error, about the best we can do is call PyErr_WriteUnraisable and go on. We won't be able to do the call below either, so verify delstr is valid. ........ r51263 | neal.norwitz | 2006-08-14 03:49:54 +0200 (Mon, 14 Aug 2006) | 1 line Update purify doc some. ........ r51264 | thomas.heller | 2006-08-14 09:13:05 +0200 (Mon, 14 Aug 2006) | 2 lines Remove unused, buggy test function. Fixes klockwork issue #207. ........ r51265 | thomas.heller | 2006-08-14 09:14:09 +0200 (Mon, 14 Aug 2006) | 2 lines Check for NULL return value from new_CArgObject(). Fixes klockwork issues #183, #184, #185. ........ r51266 | thomas.heller | 2006-08-14 09:50:14 +0200 (Mon, 14 Aug 2006) | 2 lines Check for NULL return value of GenericCData_new(). Fixes klockwork issues #188, #189. ........ r51274 | thomas.heller | 2006-08-14 12:02:24 +0200 (Mon, 14 Aug 2006) | 2 lines Revert the change that tries to zero out a closure's result storage area because the size if unknown in source/callproc.c. ........ r51276 | marc-andre.lemburg | 2006-08-14 12:55:19 +0200 (Mon, 14 Aug 2006) | 11 lines Slightly revised version of patch #1538956: Replace UnicodeDecodeErrors raised during == and != compares of Unicode and other objects with a new UnicodeWarning. All other comparisons continue to raise exceptions. Exceptions other than UnicodeDecodeErrors are also left untouched. ........ r51277 | thomas.heller | 2006-08-14 13:17:48 +0200 (Mon, 14 Aug 2006) | 13 lines Apply the patch #1532975 plus ideas from the patch #1533481. ctypes instances no longer have the internal and undocumented '_as_parameter_' attribute which was used to adapt them to foreign function calls; this mechanism is replaced by a function pointer in the type's stgdict. In the 'from_param' class methods, try the _as_parameter_ attribute if other conversions are not possible. This makes the documented _as_parameter_ mechanism work as intended. Change the ctypes version number to 1.0.1. ........ r51278 | marc-andre.lemburg | 2006-08-14 13:44:34 +0200 (Mon, 14 Aug 2006) | 3 lines Readd NEWS items that were accidentally removed by r51276. ........ r51279 | georg.brandl | 2006-08-14 14:36:06 +0200 (Mon, 14 Aug 2006) | 3 lines Improve markup in PyUnicode_RichCompare. ........ r51280 | marc-andre.lemburg | 2006-08-14 14:57:27 +0200 (Mon, 14 Aug 2006) | 3 lines Correct an accidentally removed previous patch. ........ r51281 | thomas.heller | 2006-08-14 18:17:41 +0200 (Mon, 14 Aug 2006) | 3 lines Patch #1536908: Add support for AMD64 / OpenBSD. Remove the -no-stack-protector compiler flag for OpenBSD as it has been reported to be unneeded. ........ r51282 | thomas.heller | 2006-08-14 18:20:04 +0200 (Mon, 14 Aug 2006) | 1 line News item for rev 51281. ........ r51283 | georg.brandl | 2006-08-14 22:25:39 +0200 (Mon, 14 Aug 2006) | 3 lines Fix refleak introduced in rev. 51248. ........ r51284 | georg.brandl | 2006-08-14 23:34:08 +0200 (Mon, 14 Aug 2006) | 5 lines Make tabnanny recognize IndentationErrors raised by tokenize. Add a test to test_inspect to make sure indented source is recognized correctly. (fixes #1224621) ........ r51285 | georg.brandl | 2006-08-14 23:42:55 +0200 (Mon, 14 Aug 2006) | 3 lines Patch #1535500: fix segfault in BZ2File.writelines and make sure it raises the correct exceptions. ........ r51287 | georg.brandl | 2006-08-14 23:45:32 +0200 (Mon, 14 Aug 2006) | 3 lines Add an additional test: BZ2File write methods should raise IOError when file is read-only. ........ r51289 | georg.brandl | 2006-08-14 23:55:28 +0200 (Mon, 14 Aug 2006) | 3 lines Patch #1536071: trace.py should now find the full module name of a file correctly even on Windows. ........ r51290 | georg.brandl | 2006-08-15 00:01:24 +0200 (Tue, 15 Aug 2006) | 3 lines Cookie.py shouldn't "bogusly" use string._idmap. ........ r51291 | georg.brandl | 2006-08-15 00:10:24 +0200 (Tue, 15 Aug 2006) | 3 lines Patch #1511317: don't crash on invalid hostname info ........ r51292 | tim.peters | 2006-08-15 02:25:04 +0200 (Tue, 15 Aug 2006) | 2 lines Whitespace normalization. ........ r51293 | neal.norwitz | 2006-08-15 06:14:57 +0200 (Tue, 15 Aug 2006) | 3 lines Georg fixed one of my bugs, so I'll repay him with 2 NEWS entries. Now we're even. :-) ........ r51295 | neal.norwitz | 2006-08-15 06:58:28 +0200 (Tue, 15 Aug 2006) | 8 lines Fix the test for SocketServer so it should pass on cygwin and not fail sporadically on other platforms. This is really a band-aid that doesn't fix the underlying issue in SocketServer. It's not clear if it's worth it to fix SocketServer, however, I opened a bug to track it: http://python.org/sf/1540386 ........ r51296 | neal.norwitz | 2006-08-15 06:59:30 +0200 (Tue, 15 Aug 2006) | 3 lines Update the docstring to use a version a little newer than 1999. This was taken from a Debian patch. Should we update the version for each release? ........ r51298 | neal.norwitz | 2006-08-15 08:29:03 +0200 (Tue, 15 Aug 2006) | 2 lines Subclasses of int/long are allowed to define an __index__. ........ r51300 | thomas.heller | 2006-08-15 15:07:21 +0200 (Tue, 15 Aug 2006) | 1 line Check for NULL return value from new_CArgObject calls. ........ r51303 | kurt.kaiser | 2006-08-16 05:15:26 +0200 (Wed, 16 Aug 2006) | 2 lines The 'with' statement is now a Code Context block opener ........ r51304 | anthony.baxter | 2006-08-16 05:42:26 +0200 (Wed, 16 Aug 2006) | 1 line preparing for 2.5c1 ........ r51305 | anthony.baxter | 2006-08-16 05:58:37 +0200 (Wed, 16 Aug 2006) | 1 line preparing for 2.5c1 - no, really this time ........ r51306 | kurt.kaiser | 2006-08-16 07:01:42 +0200 (Wed, 16 Aug 2006) | 9 lines Patch #1540892: site.py Quitter() class attempts to close sys.stdin before raising SystemExit, allowing IDLE to honor quit() and exit(). M Lib/site.py M Lib/idlelib/PyShell.py M Lib/idlelib/CREDITS.txt M Lib/idlelib/NEWS.txt M Misc/NEWS ........ r51307 | ka-ping.yee | 2006-08-16 09:02:50 +0200 (Wed, 16 Aug 2006) | 6 lines Update code and tests to support the 'bytes_le' attribute (for little-endian byte order on Windows), and to work around clocks with low resolution yielding duplicate UUIDs. Anthony Baxter has approved this change. ........ r51308 | kurt.kaiser | 2006-08-16 09:04:17 +0200 (Wed, 16 Aug 2006) | 2 lines Get quit() and exit() to work cleanly when not using subprocess. ........ r51309 | marc-andre.lemburg | 2006-08-16 10:13:26 +0200 (Wed, 16 Aug 2006) | 2 lines Revert to having static version numbers again. ........ r51310 | martin.v.loewis | 2006-08-16 14:55:10 +0200 (Wed, 16 Aug 2006) | 2 lines Build _hashlib on Windows. Build OpenSSL with masm assembler code. Fixes #1535502. ........ r51311 | thomas.heller | 2006-08-16 15:03:11 +0200 (Wed, 16 Aug 2006) | 6 lines Add commented assert statements to check that the result of PyObject_stgdict() and PyType_stgdict() calls are non-NULL before dereferencing the result. Hopefully this fixes what klocwork is complaining about. Fix a few other nits as well. ........ r51312 | anthony.baxter | 2006-08-16 15:08:25 +0200 (Wed, 16 Aug 2006) | 1 line news entry for 51307 ........ r51313 | andrew.kuchling | 2006-08-16 15:22:20 +0200 (Wed, 16 Aug 2006) | 1 line Add UnicodeWarning ........ r51314 | andrew.kuchling | 2006-08-16 15:41:52 +0200 (Wed, 16 Aug 2006) | 1 line Bump document version to 1.0; remove pystone paragraph ........ r51315 | andrew.kuchling | 2006-08-16 15:51:32 +0200 (Wed, 16 Aug 2006) | 1 line Link to docs; remove an XXX comment ........ r51316 | martin.v.loewis | 2006-08-16 15:58:51 +0200 (Wed, 16 Aug 2006) | 1 line Make cl build step compile-only (/c). Remove libs from source list. ........ r51317 | thomas.heller | 2006-08-16 16:07:44 +0200 (Wed, 16 Aug 2006) | 5 lines The __repr__ method of a NULL py_object does no longer raise an exception. Remove a stray '?' character from the exception text when the value is retrieved of such an object. Includes tests. ........ r51318 | andrew.kuchling | 2006-08-16 16:18:23 +0200 (Wed, 16 Aug 2006) | 1 line Update bug/patch counts ........ r51319 | andrew.kuchling | 2006-08-16 16:21:14 +0200 (Wed, 16 Aug 2006) | 1 line Wording/typo fixes ........ r51320 | thomas.heller | 2006-08-16 17:10:12 +0200 (Wed, 16 Aug 2006) | 9 lines Remove the special casing of Py_None when converting the return value of the Python part of a callback function to C. If it cannot be converted, call PyErr_WriteUnraisable with the exception we got. Before, arbitrary data has been passed to the calling C code in this case. (I'm not really sure the NEWS entry is understandable, but I cannot find better words) ........ r51321 | marc-andre.lemburg | 2006-08-16 18:11:01 +0200 (Wed, 16 Aug 2006) | 2 lines Add NEWS item mentioning the reverted distutils version number patch. ........ r51322 | fredrik.lundh | 2006-08-16 18:47:07 +0200 (Wed, 16 Aug 2006) | 5 lines SF#1534630 ignore data that arrives before the opening start tag ........ r51324 | andrew.kuchling | 2006-08-16 19:11:18 +0200 (Wed, 16 Aug 2006) | 1 line Grammar fix ........ r51328 | thomas.heller | 2006-08-16 20:02:11 +0200 (Wed, 16 Aug 2006) | 12 lines Tutorial: Clarify somewhat how parameters are passed to functions (especially explain what integer means). Correct the table - Python integers and longs can both be used. Further clarification to the table comparing ctypes types, Python types, and C types. Reference: Replace integer by C ``int`` where it makes sense. ........ r51329 | kurt.kaiser | 2006-08-16 23:45:59 +0200 (Wed, 16 Aug 2006) | 8 lines File menu hotkeys: there were three 'p' assignments. Reassign the 'Save Copy As' and 'Print' hotkeys to 'y' and 't'. Change the Shell menu hotkey from 's' to 'l'. M Bindings.py M PyShell.py M NEWS.txt ........ r51330 | neil.schemenauer | 2006-08-17 01:38:05 +0200 (Thu, 17 Aug 2006) | 3 lines Fix a bug in the ``compiler`` package that caused invalid code to be generated for generator expressions. ........ r51342 | martin.v.loewis | 2006-08-17 21:19:32 +0200 (Thu, 17 Aug 2006) | 3 lines Merge 51340 and 51341 from 2.5 branch: Leave tk build directory to restore original path. Invoke debug mk1mf.pl after running Configure. ........ r51354 | martin.v.loewis | 2006-08-18 05:47:18 +0200 (Fri, 18 Aug 2006) | 3 lines Bug #1541863: uuid.uuid1 failed to generate unique identifiers on systems with low clock resolution. ........ r51355 | neal.norwitz | 2006-08-18 05:57:54 +0200 (Fri, 18 Aug 2006) | 1 line Add template for 2.6 on HEAD ........ r51356 | neal.norwitz | 2006-08-18 06:01:38 +0200 (Fri, 18 Aug 2006) | 1 line More post-release wibble ........ r51357 | neal.norwitz | 2006-08-18 06:58:33 +0200 (Fri, 18 Aug 2006) | 1 line Try to get Windows bots working again ........ r51358 | neal.norwitz | 2006-08-18 07:10:00 +0200 (Fri, 18 Aug 2006) | 1 line Try to get Windows bots working again. Take 2 ........ r51359 | neal.norwitz | 2006-08-18 07:39:20 +0200 (Fri, 18 Aug 2006) | 1 line Try to get Unix bots install working again. ........ r51360 | neal.norwitz | 2006-08-18 07:41:46 +0200 (Fri, 18 Aug 2006) | 1 line Set version to 2.6a0, seems more consistent. ........ r51362 | neal.norwitz | 2006-08-18 08:14:52 +0200 (Fri, 18 Aug 2006) | 1 line More version wibble ........ r51364 | georg.brandl | 2006-08-18 09:27:59 +0200 (Fri, 18 Aug 2006) | 4 lines Bug #1541682: Fix example in the "Refcount details" API docs. Additionally, remove a faulty example showing PySequence_SetItem applied to a newly created list object and add notes that this isn't a good idea. ........ r51366 | anthony.baxter | 2006-08-18 09:29:02 +0200 (Fri, 18 Aug 2006) | 3 lines Updating IDLE's version number to match Python's (as per python-dev discussion). ........ r51367 | anthony.baxter | 2006-08-18 09:30:07 +0200 (Fri, 18 Aug 2006) | 1 line RPM specfile updates ........ r51368 | georg.brandl | 2006-08-18 09:35:47 +0200 (Fri, 18 Aug 2006) | 2 lines Typo in tp_clear docs. ........ r51378 | andrew.kuchling | 2006-08-18 15:57:13 +0200 (Fri, 18 Aug 2006) | 1 line Minor edits ........ r51379 | thomas.heller | 2006-08-18 16:38:46 +0200 (Fri, 18 Aug 2006) | 6 lines Add asserts to check for 'impossible' NULL values, with comments. In one place where I'n not 1000% sure about the non-NULL, raise a RuntimeError for safety. This should fix the klocwork issues that Neal sent me. If so, it should be applied to the release25-maint branch also. ........ r51400 | neal.norwitz | 2006-08-19 06:22:33 +0200 (Sat, 19 Aug 2006) | 5 lines Move initialization of interned strings to before allocating the object so we don't leak op. (Fixes an earlier patch to this code) Klockwork #350 ........ r51401 | neal.norwitz | 2006-08-19 06:23:04 +0200 (Sat, 19 Aug 2006) | 4 lines Move assert to after NULL check, otherwise we deref NULL in the assert. Klocwork #307 ........ r51402 | neal.norwitz | 2006-08-19 06:25:29 +0200 (Sat, 19 Aug 2006) | 2 lines SF #1542693: Remove semi-colon at end of PyImport_ImportModuleEx macro ........ r51403 | neal.norwitz | 2006-08-19 06:28:55 +0200 (Sat, 19 Aug 2006) | 6 lines Move initialization to after the asserts for non-NULL values. Klocwork 286-287. (I'm not backporting this, but if someone wants to, feel free.) ........ r51404 | neal.norwitz | 2006-08-19 06:52:03 +0200 (Sat, 19 Aug 2006) | 6 lines Handle PyString_FromInternedString() failing (unlikely, but possible). Klocwork #325 (I'm not backporting this, but if someone wants to, feel free.) ........ r51416 | georg.brandl | 2006-08-20 15:15:39 +0200 (Sun, 20 Aug 2006) | 2 lines Patch #1542948: fix urllib2 header casing issue. With new test. ........ r51428 | jeremy.hylton | 2006-08-21 18:19:37 +0200 (Mon, 21 Aug 2006) | 3 lines Move peephole optimizer to separate file. ........ r51429 | jeremy.hylton | 2006-08-21 18:20:29 +0200 (Mon, 21 Aug 2006) | 2 lines Move peephole optimizer to separate file. (Forgot .h in previous checkin.) ........ r51432 | neal.norwitz | 2006-08-21 19:59:46 +0200 (Mon, 21 Aug 2006) | 5 lines Fix bug #1543303, tarfile adds padding that breaks gunzip. Patch # 1543897. Will backport to 2.5 ........ r51433 | neal.norwitz | 2006-08-21 20:01:30 +0200 (Mon, 21 Aug 2006) | 2 lines Add assert to make Klocwork happy (#276) ........
2150 lines
72 KiB
Python
2150 lines
72 KiB
Python
#!/usr/bin/env python
|
|
# -*- coding: iso-8859-1 -*-
|
|
#-------------------------------------------------------------------
|
|
# tarfile.py
|
|
#-------------------------------------------------------------------
|
|
# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
|
|
# All rights reserved.
|
|
#
|
|
# Permission is hereby granted, free of charge, to any person
|
|
# obtaining a copy of this software and associated documentation
|
|
# files (the "Software"), to deal in the Software without
|
|
# restriction, including without limitation the rights to use,
|
|
# copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
# copies of the Software, and to permit persons to whom the
|
|
# Software is furnished to do so, subject to the following
|
|
# conditions:
|
|
#
|
|
# The above copyright notice and this permission notice shall be
|
|
# included in all copies or substantial portions of the Software.
|
|
#
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
# OTHER DEALINGS IN THE SOFTWARE.
|
|
#
|
|
"""Read from and write to tar format archives.
|
|
"""
|
|
|
|
__version__ = "$Revision$"
|
|
# $Source$
|
|
|
|
version = "0.8.0"
|
|
__author__ = "Lars Gustäbel (lars@gustaebel.de)"
|
|
__date__ = "$Date$"
|
|
__cvsid__ = "$Id$"
|
|
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
|
|
|
|
#---------
|
|
# Imports
|
|
#---------
|
|
import sys
|
|
import os
|
|
import shutil
|
|
import stat
|
|
import errno
|
|
import time
|
|
import struct
|
|
|
|
if sys.platform == 'mac':
|
|
# This module needs work for MacOS9, especially in the area of pathname
|
|
# handling. In many places it is assumed a simple substitution of / by the
|
|
# local os.path.sep is good enough to convert pathnames, but this does not
|
|
# work with the mac rooted:path:name versus :nonrooted:path:name syntax
|
|
raise ImportError, "tarfile does not work for platform==mac"
|
|
|
|
try:
|
|
import grp, pwd
|
|
except ImportError:
|
|
grp = pwd = None
|
|
|
|
# from tarfile import *
|
|
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
|
|
|
|
#---------------------------------------------------------
|
|
# tar constants
|
|
#---------------------------------------------------------
|
|
NUL = "\0" # the null character
|
|
BLOCKSIZE = 512 # length of processing blocks
|
|
RECORDSIZE = BLOCKSIZE * 20 # length of records
|
|
MAGIC = "ustar" # magic tar string
|
|
VERSION = "00" # version number
|
|
|
|
LENGTH_NAME = 100 # maximum length of a filename
|
|
LENGTH_LINK = 100 # maximum length of a linkname
|
|
LENGTH_PREFIX = 155 # maximum length of the prefix field
|
|
MAXSIZE_MEMBER = 077777777777L # maximum size of a file (11 octal digits)
|
|
|
|
REGTYPE = "0" # regular file
|
|
AREGTYPE = "\0" # regular file
|
|
LNKTYPE = "1" # link (inside tarfile)
|
|
SYMTYPE = "2" # symbolic link
|
|
CHRTYPE = "3" # character special device
|
|
BLKTYPE = "4" # block special device
|
|
DIRTYPE = "5" # directory
|
|
FIFOTYPE = "6" # fifo special device
|
|
CONTTYPE = "7" # contiguous file
|
|
|
|
GNUTYPE_LONGNAME = "L" # GNU tar extension for longnames
|
|
GNUTYPE_LONGLINK = "K" # GNU tar extension for longlink
|
|
GNUTYPE_SPARSE = "S" # GNU tar extension for sparse file
|
|
|
|
#---------------------------------------------------------
|
|
# tarfile constants
|
|
#---------------------------------------------------------
|
|
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, # file types that tarfile
|
|
SYMTYPE, DIRTYPE, FIFOTYPE, # can cope with.
|
|
CONTTYPE, CHRTYPE, BLKTYPE,
|
|
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
|
|
GNUTYPE_SPARSE)
|
|
|
|
REGULAR_TYPES = (REGTYPE, AREGTYPE, # file types that somehow
|
|
CONTTYPE, GNUTYPE_SPARSE) # represent regular files
|
|
|
|
#---------------------------------------------------------
|
|
# Bits used in the mode field, values in octal.
|
|
#---------------------------------------------------------
|
|
S_IFLNK = 0120000 # symbolic link
|
|
S_IFREG = 0100000 # regular file
|
|
S_IFBLK = 0060000 # block device
|
|
S_IFDIR = 0040000 # directory
|
|
S_IFCHR = 0020000 # character device
|
|
S_IFIFO = 0010000 # fifo
|
|
|
|
TSUID = 04000 # set UID on execution
|
|
TSGID = 02000 # set GID on execution
|
|
TSVTX = 01000 # reserved
|
|
|
|
TUREAD = 0400 # read by owner
|
|
TUWRITE = 0200 # write by owner
|
|
TUEXEC = 0100 # execute/search by owner
|
|
TGREAD = 0040 # read by group
|
|
TGWRITE = 0020 # write by group
|
|
TGEXEC = 0010 # execute/search by group
|
|
TOREAD = 0004 # read by other
|
|
TOWRITE = 0002 # write by other
|
|
TOEXEC = 0001 # execute/search by other
|
|
|
|
#---------------------------------------------------------
|
|
# Some useful functions
|
|
#---------------------------------------------------------
|
|
|
|
def stn(s, length):
|
|
"""Convert a python string to a null-terminated string buffer.
|
|
"""
|
|
return s[:length-1] + (length - len(s) - 1) * NUL + NUL
|
|
|
|
def nti(s):
|
|
"""Convert a number field to a python number.
|
|
"""
|
|
# There are two possible encodings for a number field, see
|
|
# itn() below.
|
|
if s[0] != chr(0200):
|
|
n = int(s.rstrip(NUL) or "0", 8)
|
|
else:
|
|
n = 0L
|
|
for i in xrange(len(s) - 1):
|
|
n <<= 8
|
|
n += ord(s[i + 1])
|
|
return n
|
|
|
|
def itn(n, digits=8, posix=False):
|
|
"""Convert a python number to a number field.
|
|
"""
|
|
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
|
|
# octal digits followed by a null-byte, this allows values up to
|
|
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
|
|
# that if necessary. A leading 0200 byte indicates this particular
|
|
# encoding, the following digits-1 bytes are a big-endian
|
|
# representation. This allows values up to (256**(digits-1))-1.
|
|
if 0 <= n < 8 ** (digits - 1):
|
|
s = "%0*o" % (digits - 1, n) + NUL
|
|
else:
|
|
if posix:
|
|
raise ValueError("overflow in number field")
|
|
|
|
if n < 0:
|
|
# XXX We mimic GNU tar's behaviour with negative numbers,
|
|
# this could raise OverflowError.
|
|
n = struct.unpack("L", struct.pack("l", n))[0]
|
|
|
|
s = ""
|
|
for i in xrange(digits - 1):
|
|
s = chr(n & 0377) + s
|
|
n >>= 8
|
|
s = chr(0200) + s
|
|
return s
|
|
|
|
def calc_chksums(buf):
|
|
"""Calculate the checksum for a member's header by summing up all
|
|
characters except for the chksum field which is treated as if
|
|
it was filled with spaces. According to the GNU tar sources,
|
|
some tars (Sun and NeXT) calculate chksum with signed char,
|
|
which will be different if there are chars in the buffer with
|
|
the high bit set. So we calculate two checksums, unsigned and
|
|
signed.
|
|
"""
|
|
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
|
|
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
|
|
return unsigned_chksum, signed_chksum
|
|
|
|
def copyfileobj(src, dst, length=None):
|
|
"""Copy length bytes from fileobj src to fileobj dst.
|
|
If length is None, copy the entire content.
|
|
"""
|
|
if length == 0:
|
|
return
|
|
if length is None:
|
|
shutil.copyfileobj(src, dst)
|
|
return
|
|
|
|
BUFSIZE = 16 * 1024
|
|
blocks, remainder = divmod(length, BUFSIZE)
|
|
for b in xrange(blocks):
|
|
buf = src.read(BUFSIZE)
|
|
if len(buf) < BUFSIZE:
|
|
raise IOError("end of file reached")
|
|
dst.write(buf)
|
|
|
|
if remainder != 0:
|
|
buf = src.read(remainder)
|
|
if len(buf) < remainder:
|
|
raise IOError("end of file reached")
|
|
dst.write(buf)
|
|
return
|
|
|
|
filemode_table = (
|
|
((S_IFLNK, "l"),
|
|
(S_IFREG, "-"),
|
|
(S_IFBLK, "b"),
|
|
(S_IFDIR, "d"),
|
|
(S_IFCHR, "c"),
|
|
(S_IFIFO, "p")),
|
|
|
|
((TUREAD, "r"),),
|
|
((TUWRITE, "w"),),
|
|
((TUEXEC|TSUID, "s"),
|
|
(TSUID, "S"),
|
|
(TUEXEC, "x")),
|
|
|
|
((TGREAD, "r"),),
|
|
((TGWRITE, "w"),),
|
|
((TGEXEC|TSGID, "s"),
|
|
(TSGID, "S"),
|
|
(TGEXEC, "x")),
|
|
|
|
((TOREAD, "r"),),
|
|
((TOWRITE, "w"),),
|
|
((TOEXEC|TSVTX, "t"),
|
|
(TSVTX, "T"),
|
|
(TOEXEC, "x"))
|
|
)
|
|
|
|
def filemode(mode):
|
|
"""Convert a file's mode to a string of the form
|
|
-rwxrwxrwx.
|
|
Used by TarFile.list()
|
|
"""
|
|
perm = []
|
|
for table in filemode_table:
|
|
for bit, char in table:
|
|
if mode & bit == bit:
|
|
perm.append(char)
|
|
break
|
|
else:
|
|
perm.append("-")
|
|
return "".join(perm)
|
|
|
|
if os.sep != "/":
|
|
normpath = lambda path: os.path.normpath(path).replace(os.sep, "/")
|
|
else:
|
|
normpath = os.path.normpath
|
|
|
|
class TarError(Exception):
|
|
"""Base exception."""
|
|
pass
|
|
class ExtractError(TarError):
|
|
"""General exception for extract errors."""
|
|
pass
|
|
class ReadError(TarError):
|
|
"""Exception for unreadble tar archives."""
|
|
pass
|
|
class CompressionError(TarError):
|
|
"""Exception for unavailable compression methods."""
|
|
pass
|
|
class StreamError(TarError):
|
|
"""Exception for unsupported operations on stream-like TarFiles."""
|
|
pass
|
|
|
|
#---------------------------
|
|
# internal stream interface
|
|
#---------------------------
|
|
class _LowLevelFile:
|
|
"""Low-level file object. Supports reading and writing.
|
|
It is used instead of a regular file object for streaming
|
|
access.
|
|
"""
|
|
|
|
def __init__(self, name, mode):
|
|
mode = {
|
|
"r": os.O_RDONLY,
|
|
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
|
|
}[mode]
|
|
if hasattr(os, "O_BINARY"):
|
|
mode |= os.O_BINARY
|
|
self.fd = os.open(name, mode)
|
|
|
|
def close(self):
|
|
os.close(self.fd)
|
|
|
|
def read(self, size):
|
|
return os.read(self.fd, size)
|
|
|
|
def write(self, s):
|
|
os.write(self.fd, s)
|
|
|
|
class _Stream:
|
|
"""Class that serves as an adapter between TarFile and
|
|
a stream-like object. The stream-like object only
|
|
needs to have a read() or write() method and is accessed
|
|
blockwise. Use of gzip or bzip2 compression is possible.
|
|
A stream-like object could be for example: sys.stdin,
|
|
sys.stdout, a socket, a tape device etc.
|
|
|
|
_Stream is intended to be used only internally.
|
|
"""
|
|
|
|
def __init__(self, name, mode, comptype, fileobj, bufsize):
|
|
"""Construct a _Stream object.
|
|
"""
|
|
self._extfileobj = True
|
|
if fileobj is None:
|
|
fileobj = _LowLevelFile(name, mode)
|
|
self._extfileobj = False
|
|
|
|
if comptype == '*':
|
|
# Enable transparent compression detection for the
|
|
# stream interface
|
|
fileobj = _StreamProxy(fileobj)
|
|
comptype = fileobj.getcomptype()
|
|
|
|
self.name = name or ""
|
|
self.mode = mode
|
|
self.comptype = comptype
|
|
self.fileobj = fileobj
|
|
self.bufsize = bufsize
|
|
self.buf = ""
|
|
self.pos = 0L
|
|
self.closed = False
|
|
|
|
if comptype == "gz":
|
|
try:
|
|
import zlib
|
|
except ImportError:
|
|
raise CompressionError("zlib module is not available")
|
|
self.zlib = zlib
|
|
self.crc = zlib.crc32("")
|
|
if mode == "r":
|
|
self._init_read_gz()
|
|
else:
|
|
self._init_write_gz()
|
|
|
|
if comptype == "bz2":
|
|
try:
|
|
import bz2
|
|
except ImportError:
|
|
raise CompressionError("bz2 module is not available")
|
|
if mode == "r":
|
|
self.dbuf = ""
|
|
self.cmp = bz2.BZ2Decompressor()
|
|
else:
|
|
self.cmp = bz2.BZ2Compressor()
|
|
|
|
def __del__(self):
|
|
if hasattr(self, "closed") and not self.closed:
|
|
self.close()
|
|
|
|
def _init_write_gz(self):
|
|
"""Initialize for writing with gzip compression.
|
|
"""
|
|
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
|
|
-self.zlib.MAX_WBITS,
|
|
self.zlib.DEF_MEM_LEVEL,
|
|
0)
|
|
timestamp = struct.pack("<L", long(time.time()))
|
|
self.__write("\037\213\010\010%s\002\377" % timestamp)
|
|
if self.name.endswith(".gz"):
|
|
self.name = self.name[:-3]
|
|
self.__write(self.name + NUL)
|
|
|
|
def write(self, s):
|
|
"""Write string s to the stream.
|
|
"""
|
|
if self.comptype == "gz":
|
|
self.crc = self.zlib.crc32(s, self.crc)
|
|
self.pos += len(s)
|
|
if self.comptype != "tar":
|
|
s = self.cmp.compress(s)
|
|
self.__write(s)
|
|
|
|
def __write(self, s):
|
|
"""Write string s to the stream if a whole new block
|
|
is ready to be written.
|
|
"""
|
|
self.buf += s
|
|
while len(self.buf) > self.bufsize:
|
|
self.fileobj.write(self.buf[:self.bufsize])
|
|
self.buf = self.buf[self.bufsize:]
|
|
|
|
def close(self):
|
|
"""Close the _Stream object. No operation should be
|
|
done on it afterwards.
|
|
"""
|
|
if self.closed:
|
|
return
|
|
|
|
if self.mode == "w" and self.comptype != "tar":
|
|
self.buf += self.cmp.flush()
|
|
|
|
if self.mode == "w" and self.buf:
|
|
self.fileobj.write(self.buf)
|
|
self.buf = ""
|
|
if self.comptype == "gz":
|
|
# The native zlib crc is an unsigned 32-bit integer, but
|
|
# the Python wrapper implicitly casts that to a signed C
|
|
# long. So, on a 32-bit box self.crc may "look negative",
|
|
# while the same crc on a 64-bit box may "look positive".
|
|
# To avoid irksome warnings from the `struct` module, force
|
|
# it to look positive on all boxes.
|
|
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
|
|
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
|
|
|
|
if not self._extfileobj:
|
|
self.fileobj.close()
|
|
|
|
self.closed = True
|
|
|
|
def _init_read_gz(self):
|
|
"""Initialize for reading a gzip compressed fileobj.
|
|
"""
|
|
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
|
|
self.dbuf = ""
|
|
|
|
# taken from gzip.GzipFile with some alterations
|
|
if self.__read(2) != "\037\213":
|
|
raise ReadError("not a gzip file")
|
|
if self.__read(1) != "\010":
|
|
raise CompressionError("unsupported compression method")
|
|
|
|
flag = ord(self.__read(1))
|
|
self.__read(6)
|
|
|
|
if flag & 4:
|
|
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
|
|
self.read(xlen)
|
|
if flag & 8:
|
|
while True:
|
|
s = self.__read(1)
|
|
if not s or s == NUL:
|
|
break
|
|
if flag & 16:
|
|
while True:
|
|
s = self.__read(1)
|
|
if not s or s == NUL:
|
|
break
|
|
if flag & 2:
|
|
self.__read(2)
|
|
|
|
def tell(self):
|
|
"""Return the stream's file pointer position.
|
|
"""
|
|
return self.pos
|
|
|
|
def seek(self, pos=0):
|
|
"""Set the stream's file pointer to pos. Negative seeking
|
|
is forbidden.
|
|
"""
|
|
if pos - self.pos >= 0:
|
|
blocks, remainder = divmod(pos - self.pos, self.bufsize)
|
|
for i in xrange(blocks):
|
|
self.read(self.bufsize)
|
|
self.read(remainder)
|
|
else:
|
|
raise StreamError("seeking backwards is not allowed")
|
|
return self.pos
|
|
|
|
def read(self, size=None):
|
|
"""Return the next size number of bytes from the stream.
|
|
If size is not defined, return all bytes of the stream
|
|
up to EOF.
|
|
"""
|
|
if size is None:
|
|
t = []
|
|
while True:
|
|
buf = self._read(self.bufsize)
|
|
if not buf:
|
|
break
|
|
t.append(buf)
|
|
buf = "".join(t)
|
|
else:
|
|
buf = self._read(size)
|
|
self.pos += len(buf)
|
|
return buf
|
|
|
|
def _read(self, size):
|
|
"""Return size bytes from the stream.
|
|
"""
|
|
if self.comptype == "tar":
|
|
return self.__read(size)
|
|
|
|
c = len(self.dbuf)
|
|
t = [self.dbuf]
|
|
while c < size:
|
|
buf = self.__read(self.bufsize)
|
|
if not buf:
|
|
break
|
|
buf = self.cmp.decompress(buf)
|
|
t.append(buf)
|
|
c += len(buf)
|
|
t = "".join(t)
|
|
self.dbuf = t[size:]
|
|
return t[:size]
|
|
|
|
def __read(self, size):
|
|
"""Return size bytes from stream. If internal buffer is empty,
|
|
read another block from the stream.
|
|
"""
|
|
c = len(self.buf)
|
|
t = [self.buf]
|
|
while c < size:
|
|
buf = self.fileobj.read(self.bufsize)
|
|
if not buf:
|
|
break
|
|
t.append(buf)
|
|
c += len(buf)
|
|
t = "".join(t)
|
|
self.buf = t[size:]
|
|
return t[:size]
|
|
# class _Stream
|
|
|
|
class _StreamProxy(object):
|
|
"""Small proxy class that enables transparent compression
|
|
detection for the Stream interface (mode 'r|*').
|
|
"""
|
|
|
|
def __init__(self, fileobj):
|
|
self.fileobj = fileobj
|
|
self.buf = self.fileobj.read(BLOCKSIZE)
|
|
|
|
def read(self, size):
|
|
self.read = self.fileobj.read
|
|
return self.buf
|
|
|
|
def getcomptype(self):
|
|
if self.buf.startswith("\037\213\010"):
|
|
return "gz"
|
|
if self.buf.startswith("BZh91"):
|
|
return "bz2"
|
|
return "tar"
|
|
|
|
def close(self):
|
|
self.fileobj.close()
|
|
# class StreamProxy
|
|
|
|
class _BZ2Proxy(object):
|
|
"""Small proxy class that enables external file object
|
|
support for "r:bz2" and "w:bz2" modes. This is actually
|
|
a workaround for a limitation in bz2 module's BZ2File
|
|
class which (unlike gzip.GzipFile) has no support for
|
|
a file object argument.
|
|
"""
|
|
|
|
blocksize = 16 * 1024
|
|
|
|
def __init__(self, fileobj, mode):
|
|
self.fileobj = fileobj
|
|
self.mode = mode
|
|
self.init()
|
|
|
|
def init(self):
|
|
import bz2
|
|
self.pos = 0
|
|
if self.mode == "r":
|
|
self.bz2obj = bz2.BZ2Decompressor()
|
|
self.fileobj.seek(0)
|
|
self.buf = ""
|
|
else:
|
|
self.bz2obj = bz2.BZ2Compressor()
|
|
|
|
def read(self, size):
|
|
b = [self.buf]
|
|
x = len(self.buf)
|
|
while x < size:
|
|
try:
|
|
raw = self.fileobj.read(self.blocksize)
|
|
data = self.bz2obj.decompress(raw)
|
|
b.append(data)
|
|
except EOFError:
|
|
break
|
|
x += len(data)
|
|
self.buf = "".join(b)
|
|
|
|
buf = self.buf[:size]
|
|
self.buf = self.buf[size:]
|
|
self.pos += len(buf)
|
|
return buf
|
|
|
|
def seek(self, pos):
|
|
if pos < self.pos:
|
|
self.init()
|
|
self.read(pos - self.pos)
|
|
|
|
def tell(self):
|
|
return self.pos
|
|
|
|
def write(self, data):
|
|
self.pos += len(data)
|
|
raw = self.bz2obj.compress(data)
|
|
self.fileobj.write(raw)
|
|
|
|
def close(self):
|
|
if self.mode == "w":
|
|
raw = self.bz2obj.flush()
|
|
self.fileobj.write(raw)
|
|
self.fileobj.close()
|
|
# class _BZ2Proxy
|
|
|
|
#------------------------
|
|
# Extraction file object
|
|
#------------------------
|
|
class ExFileObject(object):
|
|
"""File-like object for reading an archive member.
|
|
Is returned by TarFile.extractfile(). Support for
|
|
sparse files included.
|
|
"""
|
|
|
|
def __init__(self, tarfile, tarinfo):
|
|
self.fileobj = tarfile.fileobj
|
|
self.name = tarinfo.name
|
|
self.mode = "r"
|
|
self.closed = False
|
|
self.offset = tarinfo.offset_data
|
|
self.size = tarinfo.size
|
|
self.pos = 0L
|
|
self.linebuffer = ""
|
|
if tarinfo.issparse():
|
|
self.sparse = tarinfo.sparse
|
|
self.read = self._readsparse
|
|
else:
|
|
self.read = self._readnormal
|
|
|
|
def __read(self, size):
|
|
"""Overloadable read method.
|
|
"""
|
|
return self.fileobj.read(size)
|
|
|
|
def readline(self, size=-1):
|
|
"""Read a line with approx. size. If size is negative,
|
|
read a whole line. readline() and read() must not
|
|
be mixed up (!).
|
|
"""
|
|
if size < 0:
|
|
size = sys.maxint
|
|
|
|
nl = self.linebuffer.find("\n")
|
|
if nl >= 0:
|
|
nl = min(nl, size)
|
|
else:
|
|
size -= len(self.linebuffer)
|
|
while (nl < 0 and size > 0):
|
|
buf = self.read(min(size, 100))
|
|
if not buf:
|
|
break
|
|
self.linebuffer += buf
|
|
size -= len(buf)
|
|
nl = self.linebuffer.find("\n")
|
|
if nl == -1:
|
|
s = self.linebuffer
|
|
self.linebuffer = ""
|
|
return s
|
|
buf = self.linebuffer[:nl]
|
|
self.linebuffer = self.linebuffer[nl + 1:]
|
|
while buf[-1:] == "\r":
|
|
buf = buf[:-1]
|
|
return buf + "\n"
|
|
|
|
def readlines(self):
|
|
"""Return a list with all (following) lines.
|
|
"""
|
|
result = []
|
|
while True:
|
|
line = self.readline()
|
|
if not line: break
|
|
result.append(line)
|
|
return result
|
|
|
|
def _readnormal(self, size=None):
|
|
"""Read operation for regular files.
|
|
"""
|
|
if self.closed:
|
|
raise ValueError("file is closed")
|
|
self.fileobj.seek(self.offset + self.pos)
|
|
bytesleft = self.size - self.pos
|
|
if size is None:
|
|
bytestoread = bytesleft
|
|
else:
|
|
bytestoread = min(size, bytesleft)
|
|
self.pos += bytestoread
|
|
return self.__read(bytestoread)
|
|
|
|
def _readsparse(self, size=None):
|
|
"""Read operation for sparse files.
|
|
"""
|
|
if self.closed:
|
|
raise ValueError("file is closed")
|
|
|
|
if size is None:
|
|
size = self.size - self.pos
|
|
|
|
data = []
|
|
while size > 0:
|
|
buf = self._readsparsesection(size)
|
|
if not buf:
|
|
break
|
|
size -= len(buf)
|
|
data.append(buf)
|
|
return "".join(data)
|
|
|
|
def _readsparsesection(self, size):
|
|
"""Read a single section of a sparse file.
|
|
"""
|
|
section = self.sparse.find(self.pos)
|
|
|
|
if section is None:
|
|
return ""
|
|
|
|
toread = min(size, section.offset + section.size - self.pos)
|
|
if isinstance(section, _data):
|
|
realpos = section.realpos + self.pos - section.offset
|
|
self.pos += toread
|
|
self.fileobj.seek(self.offset + realpos)
|
|
return self.__read(toread)
|
|
else:
|
|
self.pos += toread
|
|
return NUL * toread
|
|
|
|
def tell(self):
|
|
"""Return the current file position.
|
|
"""
|
|
return self.pos
|
|
|
|
def seek(self, pos, whence=0):
|
|
"""Seek to a position in the file.
|
|
"""
|
|
self.linebuffer = ""
|
|
if whence == 0:
|
|
self.pos = min(max(pos, 0), self.size)
|
|
if whence == 1:
|
|
if pos < 0:
|
|
self.pos = max(self.pos + pos, 0)
|
|
else:
|
|
self.pos = min(self.pos + pos, self.size)
|
|
if whence == 2:
|
|
self.pos = max(min(self.size + pos, self.size), 0)
|
|
|
|
def close(self):
|
|
"""Close the file object.
|
|
"""
|
|
self.closed = True
|
|
|
|
def __iter__(self):
|
|
"""Get an iterator over the file object.
|
|
"""
|
|
if self.closed:
|
|
raise ValueError("I/O operation on closed file")
|
|
return self
|
|
|
|
def next(self):
|
|
"""Get the next item from the file iterator.
|
|
"""
|
|
result = self.readline()
|
|
if not result:
|
|
raise StopIteration
|
|
return result
|
|
|
|
#class ExFileObject
|
|
|
|
#------------------
|
|
# Exported Classes
|
|
#------------------
|
|
class TarInfo(object):
|
|
"""Informational class which holds the details about an
|
|
archive member given by a tar header block.
|
|
TarInfo objects are returned by TarFile.getmember(),
|
|
TarFile.getmembers() and TarFile.gettarinfo() and are
|
|
usually created internally.
|
|
"""
|
|
|
|
def __init__(self, name=""):
|
|
"""Construct a TarInfo object. name is the optional name
|
|
of the member.
|
|
"""
|
|
|
|
self.name = name # member name (dirnames must end with '/')
|
|
self.mode = 0666 # file permissions
|
|
self.uid = 0 # user id
|
|
self.gid = 0 # group id
|
|
self.size = 0 # file size
|
|
self.mtime = 0 # modification time
|
|
self.chksum = 0 # header checksum
|
|
self.type = REGTYPE # member type
|
|
self.linkname = "" # link name
|
|
self.uname = "user" # user name
|
|
self.gname = "group" # group name
|
|
self.devmajor = 0 # device major number
|
|
self.devminor = 0 # device minor number
|
|
self.prefix = "" # prefix to filename or information
|
|
# about sparse files
|
|
|
|
self.offset = 0 # the tar header starts here
|
|
self.offset_data = 0 # the file's data starts here
|
|
|
|
def __repr__(self):
|
|
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
|
|
|
|
@classmethod
|
|
def frombuf(cls, buf):
|
|
"""Construct a TarInfo object from a 512 byte string buffer.
|
|
"""
|
|
if len(buf) != BLOCKSIZE:
|
|
raise ValueError("truncated header")
|
|
if buf.count(NUL) == BLOCKSIZE:
|
|
raise ValueError("empty header")
|
|
|
|
tarinfo = cls()
|
|
tarinfo.buf = buf
|
|
tarinfo.name = buf[0:100].rstrip(NUL)
|
|
tarinfo.mode = nti(buf[100:108])
|
|
tarinfo.uid = nti(buf[108:116])
|
|
tarinfo.gid = nti(buf[116:124])
|
|
tarinfo.size = nti(buf[124:136])
|
|
tarinfo.mtime = nti(buf[136:148])
|
|
tarinfo.chksum = nti(buf[148:156])
|
|
tarinfo.type = buf[156:157]
|
|
tarinfo.linkname = buf[157:257].rstrip(NUL)
|
|
tarinfo.uname = buf[265:297].rstrip(NUL)
|
|
tarinfo.gname = buf[297:329].rstrip(NUL)
|
|
tarinfo.devmajor = nti(buf[329:337])
|
|
tarinfo.devminor = nti(buf[337:345])
|
|
tarinfo.prefix = buf[345:500]
|
|
|
|
if tarinfo.chksum not in calc_chksums(buf):
|
|
raise ValueError("invalid header")
|
|
return tarinfo
|
|
|
|
def tobuf(self, posix=False):
|
|
"""Return a tar header block as a 512 byte string.
|
|
"""
|
|
parts = [
|
|
stn(self.name, 100),
|
|
itn(self.mode & 07777, 8, posix),
|
|
itn(self.uid, 8, posix),
|
|
itn(self.gid, 8, posix),
|
|
itn(self.size, 12, posix),
|
|
itn(self.mtime, 12, posix),
|
|
" ", # checksum field
|
|
self.type,
|
|
stn(self.linkname, 100),
|
|
stn(MAGIC, 6),
|
|
stn(VERSION, 2),
|
|
stn(self.uname, 32),
|
|
stn(self.gname, 32),
|
|
itn(self.devmajor, 8, posix),
|
|
itn(self.devminor, 8, posix),
|
|
stn(self.prefix, 155)
|
|
]
|
|
|
|
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
|
|
chksum = calc_chksums(buf)[0]
|
|
buf = buf[:148] + "%06o\0" % chksum + buf[155:]
|
|
self.buf = buf
|
|
return buf
|
|
|
|
def isreg(self):
|
|
return self.type in REGULAR_TYPES
|
|
def isfile(self):
|
|
return self.isreg()
|
|
def isdir(self):
|
|
return self.type == DIRTYPE
|
|
def issym(self):
|
|
return self.type == SYMTYPE
|
|
def islnk(self):
|
|
return self.type == LNKTYPE
|
|
def ischr(self):
|
|
return self.type == CHRTYPE
|
|
def isblk(self):
|
|
return self.type == BLKTYPE
|
|
def isfifo(self):
|
|
return self.type == FIFOTYPE
|
|
def issparse(self):
|
|
return self.type == GNUTYPE_SPARSE
|
|
def isdev(self):
|
|
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
|
|
# class TarInfo
|
|
|
|
class TarFile(object):
|
|
"""The TarFile Class provides an interface to tar archives.
|
|
"""
|
|
|
|
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
|
|
|
|
dereference = False # If true, add content of linked file to the
|
|
# tar file, else the link.
|
|
|
|
ignore_zeros = False # If true, skips empty or invalid blocks and
|
|
# continues processing.
|
|
|
|
errorlevel = 0 # If 0, fatal errors only appear in debug
|
|
# messages (if debug >= 0). If > 0, errors
|
|
# are passed to the caller as exceptions.
|
|
|
|
posix = False # If True, generates POSIX.1-1990-compliant
|
|
# archives (no GNU extensions!)
|
|
|
|
fileobject = ExFileObject
|
|
|
|
def __init__(self, name=None, mode="r", fileobj=None):
|
|
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
|
|
read from an existing archive, 'a' to append data to an existing
|
|
file or 'w' to create a new file overwriting an existing one. `mode'
|
|
defaults to 'r'.
|
|
If `fileobj' is given, it is used for reading or writing data. If it
|
|
can be determined, `mode' is overridden by `fileobj's mode.
|
|
`fileobj' is not closed, when TarFile is closed.
|
|
"""
|
|
self.name = name
|
|
|
|
if len(mode) > 1 or mode not in "raw":
|
|
raise ValueError("mode must be 'r', 'a' or 'w'")
|
|
self._mode = mode
|
|
self.mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
|
|
|
|
if not fileobj:
|
|
fileobj = file(self.name, self.mode)
|
|
self._extfileobj = False
|
|
else:
|
|
if self.name is None and hasattr(fileobj, "name"):
|
|
self.name = fileobj.name
|
|
if hasattr(fileobj, "mode"):
|
|
self.mode = fileobj.mode
|
|
self._extfileobj = True
|
|
self.fileobj = fileobj
|
|
|
|
# Init datastructures
|
|
self.closed = False
|
|
self.members = [] # list of members as TarInfo objects
|
|
self._loaded = False # flag if all members have been read
|
|
self.offset = 0L # current position in the archive file
|
|
self.inodes = {} # dictionary caching the inodes of
|
|
# archive members already added
|
|
|
|
if self._mode == "r":
|
|
self.firstmember = None
|
|
self.firstmember = self.next()
|
|
|
|
if self._mode == "a":
|
|
# Move to the end of the archive,
|
|
# before the first empty block.
|
|
self.firstmember = None
|
|
while True:
|
|
try:
|
|
tarinfo = self.next()
|
|
except ReadError:
|
|
self.fileobj.seek(0)
|
|
break
|
|
if tarinfo is None:
|
|
self.fileobj.seek(- BLOCKSIZE, 1)
|
|
break
|
|
|
|
if self._mode in "aw":
|
|
self._loaded = True
|
|
|
|
#--------------------------------------------------------------------------
|
|
# Below are the classmethods which act as alternate constructors to the
|
|
# TarFile class. The open() method is the only one that is needed for
|
|
# public use; it is the "super"-constructor and is able to select an
|
|
# adequate "sub"-constructor for a particular compression using the mapping
|
|
# from OPEN_METH.
|
|
#
|
|
# This concept allows one to subclass TarFile without losing the comfort of
|
|
# the super-constructor. A sub-constructor is registered and made available
|
|
# by adding it to the mapping in OPEN_METH.
|
|
|
|
@classmethod
|
|
def open(cls, name=None, mode="r", fileobj=None, bufsize=20*512):
|
|
"""Open a tar archive for reading, writing or appending. Return
|
|
an appropriate TarFile class.
|
|
|
|
mode:
|
|
'r' or 'r:*' open for reading with transparent compression
|
|
'r:' open for reading exclusively uncompressed
|
|
'r:gz' open for reading with gzip compression
|
|
'r:bz2' open for reading with bzip2 compression
|
|
'a' or 'a:' open for appending
|
|
'w' or 'w:' open for writing without compression
|
|
'w:gz' open for writing with gzip compression
|
|
'w:bz2' open for writing with bzip2 compression
|
|
|
|
'r|*' open a stream of tar blocks with transparent compression
|
|
'r|' open an uncompressed stream of tar blocks for reading
|
|
'r|gz' open a gzip compressed stream of tar blocks
|
|
'r|bz2' open a bzip2 compressed stream of tar blocks
|
|
'w|' open an uncompressed stream for writing
|
|
'w|gz' open a gzip compressed stream for writing
|
|
'w|bz2' open a bzip2 compressed stream for writing
|
|
"""
|
|
|
|
if not name and not fileobj:
|
|
raise ValueError("nothing to open")
|
|
|
|
if mode in ("r", "r:*"):
|
|
# Find out which *open() is appropriate for opening the file.
|
|
for comptype in cls.OPEN_METH:
|
|
func = getattr(cls, cls.OPEN_METH[comptype])
|
|
try:
|
|
return func(name, "r", fileobj)
|
|
except (ReadError, CompressionError):
|
|
continue
|
|
raise ReadError("file could not be opened successfully")
|
|
|
|
elif ":" in mode:
|
|
filemode, comptype = mode.split(":", 1)
|
|
filemode = filemode or "r"
|
|
comptype = comptype or "tar"
|
|
|
|
# Select the *open() function according to
|
|
# given compression.
|
|
if comptype in cls.OPEN_METH:
|
|
func = getattr(cls, cls.OPEN_METH[comptype])
|
|
else:
|
|
raise CompressionError("unknown compression type %r" % comptype)
|
|
return func(name, filemode, fileobj)
|
|
|
|
elif "|" in mode:
|
|
filemode, comptype = mode.split("|", 1)
|
|
filemode = filemode or "r"
|
|
comptype = comptype or "tar"
|
|
|
|
if filemode not in "rw":
|
|
raise ValueError("mode must be 'r' or 'w'")
|
|
|
|
t = cls(name, filemode,
|
|
_Stream(name, filemode, comptype, fileobj, bufsize))
|
|
t._extfileobj = False
|
|
return t
|
|
|
|
elif mode in "aw":
|
|
return cls.taropen(name, mode, fileobj)
|
|
|
|
raise ValueError("undiscernible mode")
|
|
|
|
@classmethod
|
|
def taropen(cls, name, mode="r", fileobj=None):
|
|
"""Open uncompressed tar archive name for reading or writing.
|
|
"""
|
|
if len(mode) > 1 or mode not in "raw":
|
|
raise ValueError("mode must be 'r', 'a' or 'w'")
|
|
return cls(name, mode, fileobj)
|
|
|
|
@classmethod
|
|
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9):
|
|
"""Open gzip compressed tar archive name for reading or writing.
|
|
Appending is not allowed.
|
|
"""
|
|
if len(mode) > 1 or mode not in "rw":
|
|
raise ValueError("mode must be 'r' or 'w'")
|
|
|
|
try:
|
|
import gzip
|
|
gzip.GzipFile
|
|
except (ImportError, AttributeError):
|
|
raise CompressionError("gzip module is not available")
|
|
|
|
pre, ext = os.path.splitext(name)
|
|
pre = os.path.basename(pre)
|
|
if ext == ".tgz":
|
|
ext = ".tar"
|
|
if ext == ".gz":
|
|
ext = ""
|
|
tarname = pre + ext
|
|
|
|
if fileobj is None:
|
|
fileobj = file(name, mode + "b")
|
|
|
|
if mode != "r":
|
|
name = tarname
|
|
|
|
try:
|
|
t = cls.taropen(tarname, mode,
|
|
gzip.GzipFile(name, mode, compresslevel, fileobj)
|
|
)
|
|
except IOError:
|
|
raise ReadError("not a gzip file")
|
|
t._extfileobj = False
|
|
return t
|
|
|
|
@classmethod
|
|
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9):
|
|
"""Open bzip2 compressed tar archive name for reading or writing.
|
|
Appending is not allowed.
|
|
"""
|
|
if len(mode) > 1 or mode not in "rw":
|
|
raise ValueError("mode must be 'r' or 'w'.")
|
|
|
|
try:
|
|
import bz2
|
|
except ImportError:
|
|
raise CompressionError("bz2 module is not available")
|
|
|
|
pre, ext = os.path.splitext(name)
|
|
pre = os.path.basename(pre)
|
|
if ext == ".tbz2":
|
|
ext = ".tar"
|
|
if ext == ".bz2":
|
|
ext = ""
|
|
tarname = pre + ext
|
|
|
|
if fileobj is not None:
|
|
fileobj = _BZ2Proxy(fileobj, mode)
|
|
else:
|
|
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
|
|
|
|
try:
|
|
t = cls.taropen(tarname, mode, fileobj)
|
|
except IOError:
|
|
raise ReadError("not a bzip2 file")
|
|
t._extfileobj = False
|
|
return t
|
|
|
|
# All *open() methods are registered here.
|
|
OPEN_METH = {
|
|
"tar": "taropen", # uncompressed tar
|
|
"gz": "gzopen", # gzip compressed tar
|
|
"bz2": "bz2open" # bzip2 compressed tar
|
|
}
|
|
|
|
#--------------------------------------------------------------------------
|
|
# The public methods which TarFile provides:
|
|
|
|
def close(self):
|
|
"""Close the TarFile. In write-mode, two finishing zero blocks are
|
|
appended to the archive.
|
|
"""
|
|
if self.closed:
|
|
return
|
|
|
|
if self._mode in "aw":
|
|
self.fileobj.write(NUL * (BLOCKSIZE * 2))
|
|
self.offset += (BLOCKSIZE * 2)
|
|
# fill up the end with zero-blocks
|
|
# (like option -b20 for tar does)
|
|
blocks, remainder = divmod(self.offset, RECORDSIZE)
|
|
if remainder > 0:
|
|
self.fileobj.write(NUL * (RECORDSIZE - remainder))
|
|
|
|
if not self._extfileobj:
|
|
self.fileobj.close()
|
|
self.closed = True
|
|
|
|
def getmember(self, name):
|
|
"""Return a TarInfo object for member `name'. If `name' can not be
|
|
found in the archive, KeyError is raised. If a member occurs more
|
|
than once in the archive, its last occurence is assumed to be the
|
|
most up-to-date version.
|
|
"""
|
|
tarinfo = self._getmember(name)
|
|
if tarinfo is None:
|
|
raise KeyError("filename %r not found" % name)
|
|
return tarinfo
|
|
|
|
def getmembers(self):
|
|
"""Return the members of the archive as a list of TarInfo objects. The
|
|
list has the same order as the members in the archive.
|
|
"""
|
|
self._check()
|
|
if not self._loaded: # if we want to obtain a list of
|
|
self._load() # all members, we first have to
|
|
# scan the whole archive.
|
|
return self.members
|
|
|
|
def getnames(self):
|
|
"""Return the members of the archive as a list of their names. It has
|
|
the same order as the list returned by getmembers().
|
|
"""
|
|
return [tarinfo.name for tarinfo in self.getmembers()]
|
|
|
|
def gettarinfo(self, name=None, arcname=None, fileobj=None):
|
|
"""Create a TarInfo object for either the file `name' or the file
|
|
object `fileobj' (using os.fstat on its file descriptor). You can
|
|
modify some of the TarInfo's attributes before you add it using
|
|
addfile(). If given, `arcname' specifies an alternative name for the
|
|
file in the archive.
|
|
"""
|
|
self._check("aw")
|
|
|
|
# When fileobj is given, replace name by
|
|
# fileobj's real name.
|
|
if fileobj is not None:
|
|
name = fileobj.name
|
|
|
|
# Building the name of the member in the archive.
|
|
# Backward slashes are converted to forward slashes,
|
|
# Absolute paths are turned to relative paths.
|
|
if arcname is None:
|
|
arcname = name
|
|
arcname = normpath(arcname)
|
|
drv, arcname = os.path.splitdrive(arcname)
|
|
while arcname[0:1] == "/":
|
|
arcname = arcname[1:]
|
|
|
|
# Now, fill the TarInfo object with
|
|
# information specific for the file.
|
|
tarinfo = TarInfo()
|
|
|
|
# Use os.stat or os.lstat, depending on platform
|
|
# and if symlinks shall be resolved.
|
|
if fileobj is None:
|
|
if hasattr(os, "lstat") and not self.dereference:
|
|
statres = os.lstat(name)
|
|
else:
|
|
statres = os.stat(name)
|
|
else:
|
|
statres = os.fstat(fileobj.fileno())
|
|
linkname = ""
|
|
|
|
stmd = statres.st_mode
|
|
if stat.S_ISREG(stmd):
|
|
inode = (statres.st_ino, statres.st_dev)
|
|
if not self.dereference and \
|
|
statres.st_nlink > 1 and inode in self.inodes:
|
|
# Is it a hardlink to an already
|
|
# archived file?
|
|
type = LNKTYPE
|
|
linkname = self.inodes[inode]
|
|
else:
|
|
# The inode is added only if its valid.
|
|
# For win32 it is always 0.
|
|
type = REGTYPE
|
|
if inode[0]:
|
|
self.inodes[inode] = arcname
|
|
elif stat.S_ISDIR(stmd):
|
|
type = DIRTYPE
|
|
if arcname[-1:] != "/":
|
|
arcname += "/"
|
|
elif stat.S_ISFIFO(stmd):
|
|
type = FIFOTYPE
|
|
elif stat.S_ISLNK(stmd):
|
|
type = SYMTYPE
|
|
linkname = os.readlink(name)
|
|
elif stat.S_ISCHR(stmd):
|
|
type = CHRTYPE
|
|
elif stat.S_ISBLK(stmd):
|
|
type = BLKTYPE
|
|
else:
|
|
return None
|
|
|
|
# Fill the TarInfo object with all
|
|
# information we can get.
|
|
tarinfo.name = arcname
|
|
tarinfo.mode = stmd
|
|
tarinfo.uid = statres.st_uid
|
|
tarinfo.gid = statres.st_gid
|
|
if stat.S_ISREG(stmd):
|
|
tarinfo.size = statres.st_size
|
|
else:
|
|
tarinfo.size = 0L
|
|
tarinfo.mtime = statres.st_mtime
|
|
tarinfo.type = type
|
|
tarinfo.linkname = linkname
|
|
if pwd:
|
|
try:
|
|
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
|
|
except KeyError:
|
|
pass
|
|
if grp:
|
|
try:
|
|
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
|
|
except KeyError:
|
|
pass
|
|
|
|
if type in (CHRTYPE, BLKTYPE):
|
|
if hasattr(os, "major") and hasattr(os, "minor"):
|
|
tarinfo.devmajor = os.major(statres.st_rdev)
|
|
tarinfo.devminor = os.minor(statres.st_rdev)
|
|
return tarinfo
|
|
|
|
def list(self, verbose=True):
|
|
"""Print a table of contents to sys.stdout. If `verbose' is False, only
|
|
the names of the members are printed. If it is True, an `ls -l'-like
|
|
output is produced.
|
|
"""
|
|
self._check()
|
|
|
|
for tarinfo in self:
|
|
if verbose:
|
|
print filemode(tarinfo.mode),
|
|
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
|
|
tarinfo.gname or tarinfo.gid),
|
|
if tarinfo.ischr() or tarinfo.isblk():
|
|
print "%10s" % ("%d,%d" \
|
|
% (tarinfo.devmajor, tarinfo.devminor)),
|
|
else:
|
|
print "%10d" % tarinfo.size,
|
|
print "%d-%02d-%02d %02d:%02d:%02d" \
|
|
% time.localtime(tarinfo.mtime)[:6],
|
|
|
|
print tarinfo.name,
|
|
|
|
if verbose:
|
|
if tarinfo.issym():
|
|
print "->", tarinfo.linkname,
|
|
if tarinfo.islnk():
|
|
print "link to", tarinfo.linkname,
|
|
print
|
|
|
|
def add(self, name, arcname=None, recursive=True):
|
|
"""Add the file `name' to the archive. `name' may be any type of file
|
|
(directory, fifo, symbolic link, etc.). If given, `arcname'
|
|
specifies an alternative name for the file in the archive.
|
|
Directories are added recursively by default. This can be avoided by
|
|
setting `recursive' to False.
|
|
"""
|
|
self._check("aw")
|
|
|
|
if arcname is None:
|
|
arcname = name
|
|
|
|
# Skip if somebody tries to archive the archive...
|
|
if self.name is not None \
|
|
and os.path.abspath(name) == os.path.abspath(self.name):
|
|
self._dbg(2, "tarfile: Skipped %r" % name)
|
|
return
|
|
|
|
# Special case: The user wants to add the current
|
|
# working directory.
|
|
if name == ".":
|
|
if recursive:
|
|
if arcname == ".":
|
|
arcname = ""
|
|
for f in os.listdir("."):
|
|
self.add(f, os.path.join(arcname, f))
|
|
return
|
|
|
|
self._dbg(1, name)
|
|
|
|
# Create a TarInfo object from the file.
|
|
tarinfo = self.gettarinfo(name, arcname)
|
|
|
|
if tarinfo is None:
|
|
self._dbg(1, "tarfile: Unsupported type %r" % name)
|
|
return
|
|
|
|
# Append the tar header and data to the archive.
|
|
if tarinfo.isreg():
|
|
f = file(name, "rb")
|
|
self.addfile(tarinfo, f)
|
|
f.close()
|
|
|
|
elif tarinfo.isdir():
|
|
self.addfile(tarinfo)
|
|
if recursive:
|
|
for f in os.listdir(name):
|
|
self.add(os.path.join(name, f), os.path.join(arcname, f))
|
|
|
|
else:
|
|
self.addfile(tarinfo)
|
|
|
|
def addfile(self, tarinfo, fileobj=None):
|
|
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
|
|
given, tarinfo.size bytes are read from it and added to the archive.
|
|
You can create TarInfo objects using gettarinfo().
|
|
On Windows platforms, `fileobj' should always be opened with mode
|
|
'rb' to avoid irritation about the file size.
|
|
"""
|
|
self._check("aw")
|
|
|
|
tarinfo.name = normpath(tarinfo.name)
|
|
if tarinfo.isdir():
|
|
# directories should end with '/'
|
|
tarinfo.name += "/"
|
|
|
|
if tarinfo.linkname:
|
|
tarinfo.linkname = normpath(tarinfo.linkname)
|
|
|
|
if tarinfo.size > MAXSIZE_MEMBER:
|
|
if self.posix:
|
|
raise ValueError("file is too large (>= 8 GB)")
|
|
else:
|
|
self._dbg(2, "tarfile: Created GNU tar largefile header")
|
|
|
|
|
|
if len(tarinfo.linkname) > LENGTH_LINK:
|
|
if self.posix:
|
|
raise ValueError("linkname is too long (>%d)" % (LENGTH_LINK))
|
|
else:
|
|
self._create_gnulong(tarinfo.linkname, GNUTYPE_LONGLINK)
|
|
tarinfo.linkname = tarinfo.linkname[:LENGTH_LINK -1]
|
|
self._dbg(2, "tarfile: Created GNU tar extension LONGLINK")
|
|
|
|
if len(tarinfo.name) > LENGTH_NAME:
|
|
if self.posix:
|
|
prefix = tarinfo.name[:LENGTH_PREFIX + 1]
|
|
while prefix and prefix[-1] != "/":
|
|
prefix = prefix[:-1]
|
|
|
|
name = tarinfo.name[len(prefix):]
|
|
prefix = prefix[:-1]
|
|
|
|
if not prefix or len(name) > LENGTH_NAME:
|
|
raise ValueError("name is too long (>%d)" % (LENGTH_NAME))
|
|
|
|
tarinfo.name = name
|
|
tarinfo.prefix = prefix
|
|
else:
|
|
self._create_gnulong(tarinfo.name, GNUTYPE_LONGNAME)
|
|
tarinfo.name = tarinfo.name[:LENGTH_NAME - 1]
|
|
self._dbg(2, "tarfile: Created GNU tar extension LONGNAME")
|
|
|
|
self.fileobj.write(tarinfo.tobuf(self.posix))
|
|
self.offset += BLOCKSIZE
|
|
|
|
# If there's data to follow, append it.
|
|
if fileobj is not None:
|
|
copyfileobj(fileobj, self.fileobj, tarinfo.size)
|
|
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
|
|
if remainder > 0:
|
|
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
|
|
blocks += 1
|
|
self.offset += blocks * BLOCKSIZE
|
|
|
|
self.members.append(tarinfo)
|
|
|
|
def extractall(self, path=".", members=None):
|
|
"""Extract all members from the archive to the current working
|
|
directory and set owner, modification time and permissions on
|
|
directories afterwards. `path' specifies a different directory
|
|
to extract to. `members' is optional and must be a subset of the
|
|
list returned by getmembers().
|
|
"""
|
|
directories = []
|
|
|
|
if members is None:
|
|
members = self
|
|
|
|
for tarinfo in members:
|
|
if tarinfo.isdir():
|
|
# Extract directory with a safe mode, so that
|
|
# all files below can be extracted as well.
|
|
try:
|
|
os.makedirs(os.path.join(path, tarinfo.name), 0777)
|
|
except EnvironmentError:
|
|
pass
|
|
directories.append(tarinfo)
|
|
else:
|
|
self.extract(tarinfo, path)
|
|
|
|
# Reverse sort directories.
|
|
directories.sort(lambda a, b: cmp(a.name, b.name))
|
|
directories.reverse()
|
|
|
|
# Set correct owner, mtime and filemode on directories.
|
|
for tarinfo in directories:
|
|
path = os.path.join(path, tarinfo.name)
|
|
try:
|
|
self.chown(tarinfo, path)
|
|
self.utime(tarinfo, path)
|
|
self.chmod(tarinfo, path)
|
|
except ExtractError, e:
|
|
if self.errorlevel > 1:
|
|
raise
|
|
else:
|
|
self._dbg(1, "tarfile: %s" % e)
|
|
|
|
def extract(self, member, path=""):
|
|
"""Extract a member from the archive to the current working directory,
|
|
using its full name. Its file information is extracted as accurately
|
|
as possible. `member' may be a filename or a TarInfo object. You can
|
|
specify a different directory using `path'.
|
|
"""
|
|
self._check("r")
|
|
|
|
if isinstance(member, TarInfo):
|
|
tarinfo = member
|
|
else:
|
|
tarinfo = self.getmember(member)
|
|
|
|
# Prepare the link target for makelink().
|
|
if tarinfo.islnk():
|
|
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
|
|
|
|
try:
|
|
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
|
|
except EnvironmentError, e:
|
|
if self.errorlevel > 0:
|
|
raise
|
|
else:
|
|
if e.filename is None:
|
|
self._dbg(1, "tarfile: %s" % e.strerror)
|
|
else:
|
|
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
|
|
except ExtractError, e:
|
|
if self.errorlevel > 1:
|
|
raise
|
|
else:
|
|
self._dbg(1, "tarfile: %s" % e)
|
|
|
|
def extractfile(self, member):
|
|
"""Extract a member from the archive as a file object. `member' may be
|
|
a filename or a TarInfo object. If `member' is a regular file, a
|
|
file-like object is returned. If `member' is a link, a file-like
|
|
object is constructed from the link's target. If `member' is none of
|
|
the above, None is returned.
|
|
The file-like object is read-only and provides the following
|
|
methods: read(), readline(), readlines(), seek() and tell()
|
|
"""
|
|
self._check("r")
|
|
|
|
if isinstance(member, TarInfo):
|
|
tarinfo = member
|
|
else:
|
|
tarinfo = self.getmember(member)
|
|
|
|
if tarinfo.isreg():
|
|
return self.fileobject(self, tarinfo)
|
|
|
|
elif tarinfo.type not in SUPPORTED_TYPES:
|
|
# If a member's type is unknown, it is treated as a
|
|
# regular file.
|
|
return self.fileobject(self, tarinfo)
|
|
|
|
elif tarinfo.islnk() or tarinfo.issym():
|
|
if isinstance(self.fileobj, _Stream):
|
|
# A small but ugly workaround for the case that someone tries
|
|
# to extract a (sym)link as a file-object from a non-seekable
|
|
# stream of tar blocks.
|
|
raise StreamError("cannot extract (sym)link as file object")
|
|
else:
|
|
# A (sym)link's file object is its target's file object.
|
|
return self.extractfile(self._getmember(tarinfo.linkname,
|
|
tarinfo))
|
|
else:
|
|
# If there's no data associated with the member (directory, chrdev,
|
|
# blkdev, etc.), return None instead of a file object.
|
|
return None
|
|
|
|
def _extract_member(self, tarinfo, targetpath):
|
|
"""Extract the TarInfo object tarinfo to a physical
|
|
file called targetpath.
|
|
"""
|
|
# Fetch the TarInfo object for the given name
|
|
# and build the destination pathname, replacing
|
|
# forward slashes to platform specific separators.
|
|
if targetpath[-1:] == "/":
|
|
targetpath = targetpath[:-1]
|
|
targetpath = os.path.normpath(targetpath)
|
|
|
|
# Create all upper directories.
|
|
upperdirs = os.path.dirname(targetpath)
|
|
if upperdirs and not os.path.exists(upperdirs):
|
|
ti = TarInfo()
|
|
ti.name = upperdirs
|
|
ti.type = DIRTYPE
|
|
ti.mode = 0777
|
|
ti.mtime = tarinfo.mtime
|
|
ti.uid = tarinfo.uid
|
|
ti.gid = tarinfo.gid
|
|
ti.uname = tarinfo.uname
|
|
ti.gname = tarinfo.gname
|
|
try:
|
|
self._extract_member(ti, ti.name)
|
|
except:
|
|
pass
|
|
|
|
if tarinfo.islnk() or tarinfo.issym():
|
|
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
|
|
else:
|
|
self._dbg(1, tarinfo.name)
|
|
|
|
if tarinfo.isreg():
|
|
self.makefile(tarinfo, targetpath)
|
|
elif tarinfo.isdir():
|
|
self.makedir(tarinfo, targetpath)
|
|
elif tarinfo.isfifo():
|
|
self.makefifo(tarinfo, targetpath)
|
|
elif tarinfo.ischr() or tarinfo.isblk():
|
|
self.makedev(tarinfo, targetpath)
|
|
elif tarinfo.islnk() or tarinfo.issym():
|
|
self.makelink(tarinfo, targetpath)
|
|
elif tarinfo.type not in SUPPORTED_TYPES:
|
|
self.makeunknown(tarinfo, targetpath)
|
|
else:
|
|
self.makefile(tarinfo, targetpath)
|
|
|
|
self.chown(tarinfo, targetpath)
|
|
if not tarinfo.issym():
|
|
self.chmod(tarinfo, targetpath)
|
|
self.utime(tarinfo, targetpath)
|
|
|
|
#--------------------------------------------------------------------------
|
|
# Below are the different file methods. They are called via
|
|
# _extract_member() when extract() is called. They can be replaced in a
|
|
# subclass to implement other functionality.
|
|
|
|
def makedir(self, tarinfo, targetpath):
|
|
"""Make a directory called targetpath.
|
|
"""
|
|
try:
|
|
os.mkdir(targetpath)
|
|
except EnvironmentError, e:
|
|
if e.errno != errno.EEXIST:
|
|
raise
|
|
|
|
def makefile(self, tarinfo, targetpath):
|
|
"""Make a file called targetpath.
|
|
"""
|
|
source = self.extractfile(tarinfo)
|
|
target = file(targetpath, "wb")
|
|
copyfileobj(source, target)
|
|
source.close()
|
|
target.close()
|
|
|
|
def makeunknown(self, tarinfo, targetpath):
|
|
"""Make a file from a TarInfo object with an unknown type
|
|
at targetpath.
|
|
"""
|
|
self.makefile(tarinfo, targetpath)
|
|
self._dbg(1, "tarfile: Unknown file type %r, " \
|
|
"extracted as regular file." % tarinfo.type)
|
|
|
|
def makefifo(self, tarinfo, targetpath):
|
|
"""Make a fifo called targetpath.
|
|
"""
|
|
if hasattr(os, "mkfifo"):
|
|
os.mkfifo(targetpath)
|
|
else:
|
|
raise ExtractError("fifo not supported by system")
|
|
|
|
def makedev(self, tarinfo, targetpath):
|
|
"""Make a character or block device called targetpath.
|
|
"""
|
|
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
|
|
raise ExtractError("special devices not supported by system")
|
|
|
|
mode = tarinfo.mode
|
|
if tarinfo.isblk():
|
|
mode |= stat.S_IFBLK
|
|
else:
|
|
mode |= stat.S_IFCHR
|
|
|
|
os.mknod(targetpath, mode,
|
|
os.makedev(tarinfo.devmajor, tarinfo.devminor))
|
|
|
|
def makelink(self, tarinfo, targetpath):
|
|
"""Make a (symbolic) link called targetpath. If it cannot be created
|
|
(platform limitation), we try to make a copy of the referenced file
|
|
instead of a link.
|
|
"""
|
|
linkpath = tarinfo.linkname
|
|
try:
|
|
if tarinfo.issym():
|
|
os.symlink(linkpath, targetpath)
|
|
else:
|
|
# See extract().
|
|
os.link(tarinfo._link_target, targetpath)
|
|
except AttributeError:
|
|
if tarinfo.issym():
|
|
linkpath = os.path.join(os.path.dirname(tarinfo.name),
|
|
linkpath)
|
|
linkpath = normpath(linkpath)
|
|
|
|
try:
|
|
self._extract_member(self.getmember(linkpath), targetpath)
|
|
except (EnvironmentError, KeyError), e:
|
|
linkpath = os.path.normpath(linkpath)
|
|
try:
|
|
shutil.copy2(linkpath, targetpath)
|
|
except EnvironmentError, e:
|
|
raise IOError("link could not be created")
|
|
|
|
def chown(self, tarinfo, targetpath):
|
|
"""Set owner of targetpath according to tarinfo.
|
|
"""
|
|
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
|
|
# We have to be root to do so.
|
|
try:
|
|
g = grp.getgrnam(tarinfo.gname)[2]
|
|
except KeyError:
|
|
try:
|
|
g = grp.getgrgid(tarinfo.gid)[2]
|
|
except KeyError:
|
|
g = os.getgid()
|
|
try:
|
|
u = pwd.getpwnam(tarinfo.uname)[2]
|
|
except KeyError:
|
|
try:
|
|
u = pwd.getpwuid(tarinfo.uid)[2]
|
|
except KeyError:
|
|
u = os.getuid()
|
|
try:
|
|
if tarinfo.issym() and hasattr(os, "lchown"):
|
|
os.lchown(targetpath, u, g)
|
|
else:
|
|
if sys.platform != "os2emx":
|
|
os.chown(targetpath, u, g)
|
|
except EnvironmentError, e:
|
|
raise ExtractError("could not change owner")
|
|
|
|
def chmod(self, tarinfo, targetpath):
|
|
"""Set file permissions of targetpath according to tarinfo.
|
|
"""
|
|
if hasattr(os, 'chmod'):
|
|
try:
|
|
os.chmod(targetpath, tarinfo.mode)
|
|
except EnvironmentError, e:
|
|
raise ExtractError("could not change mode")
|
|
|
|
def utime(self, tarinfo, targetpath):
|
|
"""Set modification time of targetpath according to tarinfo.
|
|
"""
|
|
if not hasattr(os, 'utime'):
|
|
return
|
|
if sys.platform == "win32" and tarinfo.isdir():
|
|
# According to msdn.microsoft.com, it is an error (EACCES)
|
|
# to use utime() on directories.
|
|
return
|
|
try:
|
|
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
|
|
except EnvironmentError, e:
|
|
raise ExtractError("could not change modification time")
|
|
|
|
#--------------------------------------------------------------------------
|
|
def next(self):
|
|
"""Return the next member of the archive as a TarInfo object, when
|
|
TarFile is opened for reading. Return None if there is no more
|
|
available.
|
|
"""
|
|
self._check("ra")
|
|
if self.firstmember is not None:
|
|
m = self.firstmember
|
|
self.firstmember = None
|
|
return m
|
|
|
|
# Read the next block.
|
|
self.fileobj.seek(self.offset)
|
|
while True:
|
|
buf = self.fileobj.read(BLOCKSIZE)
|
|
if not buf:
|
|
return None
|
|
|
|
try:
|
|
tarinfo = TarInfo.frombuf(buf)
|
|
|
|
# Set the TarInfo object's offset to the current position of the
|
|
# TarFile and set self.offset to the position where the data blocks
|
|
# should begin.
|
|
tarinfo.offset = self.offset
|
|
self.offset += BLOCKSIZE
|
|
|
|
tarinfo = self.proc_member(tarinfo)
|
|
|
|
except ValueError, e:
|
|
if self.ignore_zeros:
|
|
self._dbg(2, "0x%X: empty or invalid block: %s" %
|
|
(self.offset, e))
|
|
self.offset += BLOCKSIZE
|
|
continue
|
|
else:
|
|
if self.offset == 0:
|
|
raise ReadError("empty, unreadable or compressed "
|
|
"file: %s" % e)
|
|
return None
|
|
break
|
|
|
|
# Some old tar programs represent a directory as a regular
|
|
# file with a trailing slash.
|
|
if tarinfo.isreg() and tarinfo.name.endswith("/"):
|
|
tarinfo.type = DIRTYPE
|
|
|
|
# The prefix field is used for filenames > 100 in
|
|
# the POSIX standard.
|
|
# name = prefix + '/' + name
|
|
tarinfo.name = normpath(os.path.join(tarinfo.prefix.rstrip(NUL),
|
|
tarinfo.name))
|
|
|
|
# Directory names should have a '/' at the end.
|
|
if tarinfo.isdir():
|
|
tarinfo.name += "/"
|
|
|
|
self.members.append(tarinfo)
|
|
return tarinfo
|
|
|
|
#--------------------------------------------------------------------------
|
|
# The following are methods that are called depending on the type of a
|
|
# member. The entry point is proc_member() which is called with a TarInfo
|
|
# object created from the header block from the current offset. The
|
|
# proc_member() method can be overridden in a subclass to add custom
|
|
# proc_*() methods. A proc_*() method MUST implement the following
|
|
# operations:
|
|
# 1. Set tarinfo.offset_data to the position where the data blocks begin,
|
|
# if there is data that follows.
|
|
# 2. Set self.offset to the position where the next member's header will
|
|
# begin.
|
|
# 3. Return tarinfo or another valid TarInfo object.
|
|
def proc_member(self, tarinfo):
|
|
"""Choose the right processing method for tarinfo depending
|
|
on its type and call it.
|
|
"""
|
|
if tarinfo.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
|
|
return self.proc_gnulong(tarinfo)
|
|
elif tarinfo.type == GNUTYPE_SPARSE:
|
|
return self.proc_sparse(tarinfo)
|
|
else:
|
|
return self.proc_builtin(tarinfo)
|
|
|
|
def proc_builtin(self, tarinfo):
|
|
"""Process a builtin type member or an unknown member
|
|
which will be treated as a regular file.
|
|
"""
|
|
tarinfo.offset_data = self.offset
|
|
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
|
|
# Skip the following data blocks.
|
|
self.offset += self._block(tarinfo.size)
|
|
return tarinfo
|
|
|
|
def proc_gnulong(self, tarinfo):
|
|
"""Process the blocks that hold a GNU longname
|
|
or longlink member.
|
|
"""
|
|
buf = ""
|
|
count = tarinfo.size
|
|
while count > 0:
|
|
block = self.fileobj.read(BLOCKSIZE)
|
|
buf += block
|
|
self.offset += BLOCKSIZE
|
|
count -= BLOCKSIZE
|
|
|
|
# Fetch the next header and process it.
|
|
b = self.fileobj.read(BLOCKSIZE)
|
|
t = TarInfo.frombuf(b)
|
|
t.offset = self.offset
|
|
self.offset += BLOCKSIZE
|
|
next = self.proc_member(t)
|
|
|
|
# Patch the TarInfo object from the next header with
|
|
# the longname information.
|
|
next.offset = tarinfo.offset
|
|
if tarinfo.type == GNUTYPE_LONGNAME:
|
|
next.name = buf.rstrip(NUL)
|
|
elif tarinfo.type == GNUTYPE_LONGLINK:
|
|
next.linkname = buf.rstrip(NUL)
|
|
|
|
return next
|
|
|
|
def proc_sparse(self, tarinfo):
|
|
"""Process a GNU sparse header plus extra headers.
|
|
"""
|
|
buf = tarinfo.buf
|
|
sp = _ringbuffer()
|
|
pos = 386
|
|
lastpos = 0L
|
|
realpos = 0L
|
|
# There are 4 possible sparse structs in the
|
|
# first header.
|
|
for i in xrange(4):
|
|
try:
|
|
offset = nti(buf[pos:pos + 12])
|
|
numbytes = nti(buf[pos + 12:pos + 24])
|
|
except ValueError:
|
|
break
|
|
if offset > lastpos:
|
|
sp.append(_hole(lastpos, offset - lastpos))
|
|
sp.append(_data(offset, numbytes, realpos))
|
|
realpos += numbytes
|
|
lastpos = offset + numbytes
|
|
pos += 24
|
|
|
|
isextended = ord(buf[482])
|
|
origsize = nti(buf[483:495])
|
|
|
|
# If the isextended flag is given,
|
|
# there are extra headers to process.
|
|
while isextended == 1:
|
|
buf = self.fileobj.read(BLOCKSIZE)
|
|
self.offset += BLOCKSIZE
|
|
pos = 0
|
|
for i in xrange(21):
|
|
try:
|
|
offset = nti(buf[pos:pos + 12])
|
|
numbytes = nti(buf[pos + 12:pos + 24])
|
|
except ValueError:
|
|
break
|
|
if offset > lastpos:
|
|
sp.append(_hole(lastpos, offset - lastpos))
|
|
sp.append(_data(offset, numbytes, realpos))
|
|
realpos += numbytes
|
|
lastpos = offset + numbytes
|
|
pos += 24
|
|
isextended = ord(buf[504])
|
|
|
|
if lastpos < origsize:
|
|
sp.append(_hole(lastpos, origsize - lastpos))
|
|
|
|
tarinfo.sparse = sp
|
|
|
|
tarinfo.offset_data = self.offset
|
|
self.offset += self._block(tarinfo.size)
|
|
tarinfo.size = origsize
|
|
|
|
# Clear the prefix field so that it is not used
|
|
# as a pathname in next().
|
|
tarinfo.prefix = ""
|
|
|
|
return tarinfo
|
|
|
|
#--------------------------------------------------------------------------
|
|
# Little helper methods:
|
|
|
|
def _block(self, count):
|
|
"""Round up a byte count by BLOCKSIZE and return it,
|
|
e.g. _block(834) => 1024.
|
|
"""
|
|
blocks, remainder = divmod(count, BLOCKSIZE)
|
|
if remainder:
|
|
blocks += 1
|
|
return blocks * BLOCKSIZE
|
|
|
|
def _getmember(self, name, tarinfo=None):
|
|
"""Find an archive member by name from bottom to top.
|
|
If tarinfo is given, it is used as the starting point.
|
|
"""
|
|
# Ensure that all members have been loaded.
|
|
members = self.getmembers()
|
|
|
|
if tarinfo is None:
|
|
end = len(members)
|
|
else:
|
|
end = members.index(tarinfo)
|
|
|
|
for i in xrange(end - 1, -1, -1):
|
|
if name == members[i].name:
|
|
return members[i]
|
|
|
|
def _load(self):
|
|
"""Read through the entire archive file and look for readable
|
|
members.
|
|
"""
|
|
while True:
|
|
tarinfo = self.next()
|
|
if tarinfo is None:
|
|
break
|
|
self._loaded = True
|
|
|
|
def _check(self, mode=None):
|
|
"""Check if TarFile is still open, and if the operation's mode
|
|
corresponds to TarFile's mode.
|
|
"""
|
|
if self.closed:
|
|
raise IOError("%s is closed" % self.__class__.__name__)
|
|
if mode is not None and self._mode not in mode:
|
|
raise IOError("bad operation for mode %r" % self._mode)
|
|
|
|
def __iter__(self):
|
|
"""Provide an iterator object.
|
|
"""
|
|
if self._loaded:
|
|
return iter(self.members)
|
|
else:
|
|
return TarIter(self)
|
|
|
|
def _create_gnulong(self, name, type):
|
|
"""Write a GNU longname/longlink member to the TarFile.
|
|
It consists of an extended tar header, with the length
|
|
of the longname as size, followed by data blocks,
|
|
which contain the longname as a null terminated string.
|
|
"""
|
|
name += NUL
|
|
|
|
tarinfo = TarInfo()
|
|
tarinfo.name = "././@LongLink"
|
|
tarinfo.type = type
|
|
tarinfo.mode = 0
|
|
tarinfo.size = len(name)
|
|
|
|
# write extended header
|
|
self.fileobj.write(tarinfo.tobuf())
|
|
self.offset += BLOCKSIZE
|
|
# write name blocks
|
|
self.fileobj.write(name)
|
|
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
|
|
if remainder > 0:
|
|
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
|
|
blocks += 1
|
|
self.offset += blocks * BLOCKSIZE
|
|
|
|
def _dbg(self, level, msg):
|
|
"""Write debugging output to sys.stderr.
|
|
"""
|
|
if level <= self.debug:
|
|
print >> sys.stderr, msg
|
|
# class TarFile
|
|
|
|
class TarIter:
|
|
"""Iterator Class.
|
|
|
|
for tarinfo in TarFile(...):
|
|
suite...
|
|
"""
|
|
|
|
def __init__(self, tarfile):
|
|
"""Construct a TarIter object.
|
|
"""
|
|
self.tarfile = tarfile
|
|
self.index = 0
|
|
def __iter__(self):
|
|
"""Return iterator object.
|
|
"""
|
|
return self
|
|
def next(self):
|
|
"""Return the next item using TarFile's next() method.
|
|
When all members have been read, set TarFile as _loaded.
|
|
"""
|
|
# Fix for SF #1100429: Under rare circumstances it can
|
|
# happen that getmembers() is called during iteration,
|
|
# which will cause TarIter to stop prematurely.
|
|
if not self.tarfile._loaded:
|
|
tarinfo = self.tarfile.next()
|
|
if not tarinfo:
|
|
self.tarfile._loaded = True
|
|
raise StopIteration
|
|
else:
|
|
try:
|
|
tarinfo = self.tarfile.members[self.index]
|
|
except IndexError:
|
|
raise StopIteration
|
|
self.index += 1
|
|
return tarinfo
|
|
|
|
# Helper classes for sparse file support
|
|
class _section:
|
|
"""Base class for _data and _hole.
|
|
"""
|
|
def __init__(self, offset, size):
|
|
self.offset = offset
|
|
self.size = size
|
|
def __contains__(self, offset):
|
|
return self.offset <= offset < self.offset + self.size
|
|
|
|
class _data(_section):
|
|
"""Represent a data section in a sparse file.
|
|
"""
|
|
def __init__(self, offset, size, realpos):
|
|
_section.__init__(self, offset, size)
|
|
self.realpos = realpos
|
|
|
|
class _hole(_section):
|
|
"""Represent a hole section in a sparse file.
|
|
"""
|
|
pass
|
|
|
|
class _ringbuffer(list):
|
|
"""Ringbuffer class which increases performance
|
|
over a regular list.
|
|
"""
|
|
def __init__(self):
|
|
self.idx = 0
|
|
def find(self, offset):
|
|
idx = self.idx
|
|
while True:
|
|
item = self[idx]
|
|
if offset in item:
|
|
break
|
|
idx += 1
|
|
if idx == len(self):
|
|
idx = 0
|
|
if idx == self.idx:
|
|
# End of File
|
|
return None
|
|
self.idx = idx
|
|
return item
|
|
|
|
#---------------------------------------------
|
|
# zipfile compatible TarFile class
|
|
#---------------------------------------------
|
|
TAR_PLAIN = 0 # zipfile.ZIP_STORED
|
|
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
|
|
class TarFileCompat:
|
|
"""TarFile class compatible with standard module zipfile's
|
|
ZipFile class.
|
|
"""
|
|
def __init__(self, file, mode="r", compression=TAR_PLAIN):
|
|
if compression == TAR_PLAIN:
|
|
self.tarfile = TarFile.taropen(file, mode)
|
|
elif compression == TAR_GZIPPED:
|
|
self.tarfile = TarFile.gzopen(file, mode)
|
|
else:
|
|
raise ValueError("unknown compression constant")
|
|
if mode[0:1] == "r":
|
|
members = self.tarfile.getmembers()
|
|
for m in members:
|
|
m.filename = m.name
|
|
m.file_size = m.size
|
|
m.date_time = time.gmtime(m.mtime)[:6]
|
|
def namelist(self):
|
|
return map(lambda m: m.name, self.infolist())
|
|
def infolist(self):
|
|
return filter(lambda m: m.type in REGULAR_TYPES,
|
|
self.tarfile.getmembers())
|
|
def printdir(self):
|
|
self.tarfile.list()
|
|
def testzip(self):
|
|
return
|
|
def getinfo(self, name):
|
|
return self.tarfile.getmember(name)
|
|
def read(self, name):
|
|
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
|
|
def write(self, filename, arcname=None, compress_type=None):
|
|
self.tarfile.add(filename, arcname)
|
|
def writestr(self, zinfo, bytes):
|
|
try:
|
|
from cStringIO import StringIO
|
|
except ImportError:
|
|
from StringIO import StringIO
|
|
import calendar
|
|
zinfo.name = zinfo.filename
|
|
zinfo.size = zinfo.file_size
|
|
zinfo.mtime = calendar.timegm(zinfo.date_time)
|
|
self.tarfile.addfile(zinfo, StringIO(bytes))
|
|
def close(self):
|
|
self.tarfile.close()
|
|
#class TarFileCompat
|
|
|
|
#--------------------
|
|
# exported functions
|
|
#--------------------
|
|
def is_tarfile(name):
|
|
"""Return True if name points to a tar archive that we
|
|
are able to handle, else return False.
|
|
"""
|
|
try:
|
|
t = open(name)
|
|
t.close()
|
|
return True
|
|
except TarError:
|
|
return False
|
|
|
|
open = TarFile.open
|