More trivial comment -> docstring transformations by Ka-Ping Yee,

who writes:

Here is batch 2, as a big collection of CVS context diffs.
Along with moving comments into docstrings, i've added a
couple of missing docstrings and attempted to make sure more
module docstrings begin with a one-line summary.

I did not add docstrings to the methods in profile.py for
fear of upsetting any careful optimizations there, though
i did move class documentation into class docstrings.

The convention i'm using is to leave credits/version/copyright
type of stuff in # comments, and move the rest of the descriptive
stuff about module usage into module docstrings.  Hope this is
okay.
This commit is contained in:
Guido van Rossum 2000-02-04 15:10:34 +00:00
parent 8b6323d3ef
commit 54f22ed30b
30 changed files with 1547 additions and 1792 deletions

View file

@ -1,64 +1,64 @@
# Module 'dospath' -- common operations on DOS pathnames
"""Module 'dospath' -- common operations on DOS pathnames"""
import os
import stat
import string
# Normalize the case of a pathname.
# On MS-DOS it maps the pathname to lowercase, turns slashes into
# backslashes.
# Other normalizations (such as optimizing '../' away) are not allowed
# (this is done by normpath).
# Previously, this version mapped invalid consecutive characters to a
# single '_', but this has been removed. This functionality should
# possibly be added as a new function.
def normcase(s):
return string.lower(string.replace(s, "/", "\\"))
"""Normalize the case of a pathname.
On MS-DOS it maps the pathname to lowercase, turns slashes into
backslashes.
Other normalizations (such as optimizing '../' away) are not allowed
(this is done by normpath).
Previously, this version mapped invalid consecutive characters to a
single '_', but this has been removed. This functionality should
possibly be added as a new function."""
return string.lower(string.replace(s, "/", "\\"))
# Return wheter a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon starts with
# a slash or backslash.
def isabs(s):
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
"""Return whether a path is absolute.
Trivial in Posix, harder on the Mac or MS-DOS.
For DOS it is absolute if it starts with a slash or backslash (current
volume), or if a pathname after the volume letter and colon starts with
a slash or backslash."""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
path = a
for b in p:
if isabs(b):
path = b
elif path == '' or path[-1:] in '/\\':
path = path + b
else:
path = path + os.sep + b
return path
"""Join two (or more) paths."""
path = a
for b in p:
if isabs(b):
path = b
elif path == '' or path[-1:] in '/\\':
path = path + b
else:
path = path + os.sep + b
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
"""Split a path into a drive specification (a drive letter followed
by a colon) and path specification.
It is always true that drivespec + pathspec == p."""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a path into head (everything up to the last '/') and tail
(the rest). After the trailing '/' is stripped, the invariant
join(head, tail) == p holds.
The resulting head won't end in '/' unless it is the root."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
@ -73,47 +73,47 @@ def split(p):
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the first dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
root, ext = '', ''
for c in p:
if c in '/\\':
root, ext = root + ext + c, ''
elif c == '.' or ext:
ext = ext + c
else:
root = root + c
return root, ext
"""Split a path into root and extension.
The extension is everything starting at the first dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c in '/\\':
root, ext = root + ext + c, ''
elif c == '.' or ext:
ext = ext + c
else:
root = root + c
return root, ext
# Return the tail (basename) part of a path.
def basename(p):
return split(p)[1]
"""Return the tail (basename) part of a path."""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
return split(p)[0]
"""Return the head (dirname) part of a path."""
return split(p)[0]
# Return the longest prefix of all list elements.
def commonprefix(m):
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] <> item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
"""Return the longest prefix of all list elements."""
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] <> item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
# Get size, mtime, atime of files.
@ -134,200 +134,196 @@ def getatime(filename):
return st[stat.ST_MTIME]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
return 0
"""Is a path a symbolic link?
This will always return false on systems where posix.lstat doesn't exist."""
return 0
# Does a path exist?
# This is false for dangling symbolic links.
def exists(path):
try:
st = os.stat(path)
except os.error:
return 0
return 1
"""Does a path exist?
This is false for dangling symbolic links."""
try:
st = os.stat(path)
except os.error:
return 0
return 1
# Is a path a dos directory?
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path.
def isdir(path):
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISDIR(st[stat.ST_MODE])
"""Is a path a dos directory?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISDIR(st[stat.ST_MODE])
# Is a path a regular file?
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path.
def isfile(path):
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISREG(st[stat.ST_MODE])
"""Is a path a regular file?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISREG(st[stat.ST_MODE])
# Is a path a mount point?
# XXX This degenerates in: 'is this the root?' on DOS
def ismount(path):
return isabs(splitdrive(path)[1])
"""Is a path a mount point?"""
# XXX This degenerates in: 'is this the root?' on DOS
return isabs(splitdrive(path)[1])
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# files files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
"""Directory tree walk.
For each directory under top (including top itself, but excluding
'.' and '..'), func(arg, dirname, filenames) is called, where
dirname is the name of the directory and filenames is the list
files files (and subdirectories etc.) in the directory.
The func may modify the filenames list, to implement a filter,
or to impose a different order of visiting."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
if path[:1] <> '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i+1
if i == 1:
if not os.environ.has_key('HOME'):
return path
userhome = os.environ['HOME']
else:
return path
return userhome + path[i:]
"""Expand paths beginning with '~' or '~user'.
'~' means $HOME; '~user' means that user's home directory.
If the path doesn't begin with '~', or if the user or $HOME is unknown,
the path is returned unchanged (leaving error reporting to whatever
function is called with the expanded path as argument).
See also module 'glob' for expansion of *, ? and [...] in pathnames.
(A function should also be defined to do full *sh-style environment
variable expansion.)"""
if path[:1] <> '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i+1
if i == 1:
if not os.environ.has_key('HOME'):
return path
userhome = os.environ['HOME']
else:
return path
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - no escape character, except for '$$' which is translated into '$'
# - ${varname} is accepted.
# - varnames can be made out of letters, digits and the character '_'
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
varchars = string.letters + string.digits + '_-'
def expandvars(path):
if '$' not in path:
return path
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = string.index(path, '\'')
res = res + '\'' + path[:index + 1]
except string.index_error:
res = res + path
index = pathlen -1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = string.index(path, '}')
var = path[:index]
if os.environ.has_key(var):
res = res + os.environ[var]
except string.index_error:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if os.environ.has_key(var):
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
"""Expand paths containing shell variable substitutions.
The following rules apply:
- no expansion within single quotes
- no escape character, except for '$$' which is translated into '$'
- ${varname} is accepted.
- varnames can be made out of letters, digits and the character '_'"""
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
if '$' not in path:
return path
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = string.index(path, '\'')
res = res + '\'' + path[:index + 1]
except string.index_error:
res = res + path
index = pathlen -1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = string.index(path, '}')
var = path[:index]
if os.environ.has_key(var):
res = res + os.environ[var]
except string.index_error:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if os.environ.has_key(var):
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# Also, components of the path are silently truncated to 8+3 notation.
def normpath(path):
path = string.replace(path, "/", "\\")
prefix, path = splitdrive(path)
while path[:1] == os.sep:
prefix = prefix + os.sep
path = path[1:]
comps = string.splitfields(path, os.sep)
i = 0
while i < len(comps):
if comps[i] == '.':
del comps[i]
elif comps[i] == '..' and i > 0 and \
comps[i-1] not in ('', '..'):
del comps[i-1:i+1]
i = i-1
elif comps[i] == '' and i > 0 and comps[i-1] <> '':
del comps[i]
elif '.' in comps[i]:
comp = string.splitfields(comps[i], '.')
comps[i] = comp[0][:8] + '.' + comp[1][:3]
i = i+1
elif len(comps[i]) > 8:
comps[i] = comps[i][:8]
i = i+1
else:
i = i+1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + string.joinfields(comps, os.sep)
"""Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
Also, components of the path are silently truncated to 8+3 notation."""
path = string.replace(path, "/", "\\")
prefix, path = splitdrive(path)
while path[:1] == os.sep:
prefix = prefix + os.sep
path = path[1:]
comps = string.splitfields(path, os.sep)
i = 0
while i < len(comps):
if comps[i] == '.':
del comps[i]
elif comps[i] == '..' and i > 0 and \
comps[i-1] not in ('', '..'):
del comps[i-1:i+1]
i = i-1
elif comps[i] == '' and i > 0 and comps[i-1] <> '':
del comps[i]
elif '.' in comps[i]:
comp = string.splitfields(comps[i], '.')
comps[i] = comp[0][:8] + '.' + comp[1][:3]
i = i+1
elif len(comps[i]) > 8:
comps[i] = comps[i][:8]
i = i+1
else:
i = i+1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + string.joinfields(comps, os.sep)
# Return an absolute path.
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)

View file

@ -1,318 +1,57 @@
"""Utilities for comparing files and directories.
"""Compare files."""
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=1, use_statcache=0) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
import statcache
import os, stat, statcache
_cache = {}
BUFSIZE=8*1024
def cmp(f1, f2, shallow=1,use_statcache=0):
"""Compare two files.
def cmp(f1, f2, shallow=1,use_statcache=0):
"""Compare two files.
Arguments:
Arguments:
f1 -- First file name
f1 -- First file name
f2 -- Second file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
shallow -- Just check stat signature (do not read the files).
defaults to 1.
use_statcache -- Do not stat() each file directly: go through
the statcache module for more efficiency.
use_statcache -- Do not stat() each file directly: go through
the statcache module for more efficiency.
Return value:
Return value:
integer -- 1 if the files are the same, 0 otherwise.
integer -- 1 if the files are the same, 0 otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
Of course, if 'use_statcache' is true, this mechanism is defeated,
and the cache will never grow stale.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
Of course, if 'use_statcache' is true, this mechanism is defeated,
and the cache will never grow stale.
"""
stat_function = (os.stat, statcache.stat)[use_statcache]
s1, s2 = _sig(stat_function(f1)), _sig(stat_function(f2))
if s1[0]!=stat.S_IFREG or s2[0]!=stat.S_IFREG: return 0
if shallow and s1 == s2: return 1
if s1[1]!=s2[1]: return 0
"""
stat_function = (os.stat, statcache.stat)[use_statcache]
s1, s2 = _sig(stat_function(f1)), _sig(stat_function(f2))
if s1[0]!=stat.S_IFREG or s2[0]!=stat.S_IFREG: return 0
if shallow and s1 == s2: return 1
if s1[1]!=s2[1]: return 0
result = _cache.get((f1, f2))
if result and (s1, s2)==result[:2]:
return result[2]
outcome = _do_cmp(f1, f2)
_cache[f1, f2] = s1, s2, outcome
return outcome
result = _cache.get((f1, f2))
if result and (s1, s2)==result[:2]:
return result[2]
outcome = _do_cmp(f1, f2)
_cache[f1, f2] = s1, s2, outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st[stat.ST_MODE]),
st[stat.ST_SIZE],
st[stat.ST_MTIME])
return (stat.S_IFMT(st[stat.ST_MODE]),
st[stat.ST_SIZE],
st[stat.ST_MTIME])
def _do_cmp(f1, f2):
bufsize = BUFSIZE
fp1 , fp2 = open(f1, 'rb'), open(f2, 'rb')
while 1:
b1, b2 = fp1.read(bufsize), fp2.read(bufsize)
if b1!=b2: return 0
if not b1: return 1
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a,b,ignore=None,hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
__p4_attrs = ('subdirs',)
__p3_attrs = ('same_files', 'diff_files', 'funny_files')
__p2_attrs = ('common_dirs', 'common_files', 'common_funny')
__p1_attrs = ('common', 'left_only', 'right_only')
__p0_attrs = ('left_list', 'right_list')
def __getattr__(self, attr):
if attr in self.__p4_attrs:
self.phase4()
elif attr in self.__p3_attrs:
self.phase3()
elif attr in self.__p2_attrs:
self.phase2()
elif attr in self.__p1_attrs:
self.phase1()
elif attr in self.__p0_attrs:
self.phase0()
else:
raise AttributeError, attr
return getattr(self, attr)
def phase1(self): # Compute common names
a_only, b_only = [], []
common = {}
b = {}
for fnm in self.right_list:
b[fnm] = 1
for x in self.left_list:
if b.get(x, 0):
common[x] = 1
else:
a_only.append(x)
for x in self.right_list:
if common.get(x, 0):
pass
else:
b_only.append(x)
self.common = common.keys()
self.left_only = a_only
self.right_only = b_only
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = statcache.stat(a_path)
except os.error, why:
# print 'Can\'t stat', a_path, ':', why[1]
ok = 0
try:
b_stat = statcache.stat(b_path)
except os.error, why:
# print 'Can\'t stat', b_path, ':', why[1]
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat[stat.ST_MODE])
b_type = stat.S_IFMT(b_stat[stat.ST_MODE])
if a_type <> b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for x in self.subdirs.keys():
self.subdirs[x].phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print 'diff', self.left, self.right
if self.left_only:
self.left_only.sort()
print 'Only in', self.left, ':', self.left_only
if self.right_only:
self.right_only.sort()
print 'Only in', self.right, ':', self.right_only
if self.same_files:
self.same_files.sort()
print 'Identical files :', self.same_files
if self.diff_files:
self.diff_files.sort()
print 'Differing files :', self.diff_files
if self.funny_files:
self.funny_files.sort()
print 'Trouble with common files :', self.funny_files
if self.common_dirs:
self.common_dirs.sort()
print 'Common subdirectories :', self.common_dirs
if self.common_funny:
self.common_funny.sort()
print 'Common funny cases :', self.common_funny
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for x in self.subdirs.keys():
print
self.subdirs[x].report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for x in self.subdirs.keys():
print
self.subdirs[x].report_full_closure()
# Compare common files in two directories.
# Return:
# - files that compare equal
# - files that compare different
# - funny cases (can't stat etc.)
#
def cmpfiles(a, b, common):
"""Compare common files in two directories.
cmpfiles(a,b,common)
A and B are directory names
COMMON is a list of file names
returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files."""
res = ([], [], [])
for x in common:
res[_cmp(os.path.join(a, x), os.path.join(b, x))].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b):
try:
return not abs(cmp(a, b))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(list, skip):
result = []
for item in list:
if item not in skip: result.append(item)
return result
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) <> 2: raise getopt.error, 'need exactly two args'
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
bufsize = BUFSIZE
fp1 , fp2 = open(f1, 'rb'), open(f2, 'rb')
while 1:
b1, b2 = fp1.read(bufsize), fp2.read(bufsize)
if b1!=b2: return 0
if not b1: return 1

View file

@ -22,120 +22,120 @@
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
NotANumber = 'fpformat.NotANumber'
# Return (sign, intpart, fraction, expo) or raise an exception:
# sign is '+' or '-'
# intpart is 0 or more digits beginning with a nonzero
# fraction is 0 or more digits
# expo is an integer
def extract(s):
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
# Remove the exponent by changing intpart and fraction
def unexpo(intpart, fraction, expo):
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
# Round or extend the fraction to size digs
def roundfrac(intpart, fraction, digs):
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
# Format x as [-]ddd.ddd with 'digs' digits after the point
# and at least one digit before.
# If digs <= 0, the point is suppressed.
def fix(x, digs):
if type(x) != type(''): x = `x`
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = `x`
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
# Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
# and exactly one digit before.
# If digs is <= 0, one digit is kept and the point is suppressed.
def sci(x, digs):
if type(x) != type(''): x = `x`
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = `abs(expo)`
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = `x`
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = `abs(expo)`
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
# Interactive test run
def test():
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass

View file

@ -1,4 +1,4 @@
# Gopher protocol client interface
"""Gopher protocol client interface."""
import string
@ -29,180 +29,180 @@
A_WHOIS = 'w'
A_QUERY = 'q'
A_GIF = 'g'
A_HTML = 'h' # HTML file
A_WWW = 'w' # WWW address
A_HTML = 'h' # HTML file
A_WWW = 'w' # WWW address
A_PLUS_IMAGE = ':'
A_PLUS_MOVIE = ';'
A_PLUS_SOUND = '<'
# Function mapping all file types to strings; unknown types become TYPE='x'
_names = dir()
_type_to_name_map = {}
def type_to_name(gtype):
global _type_to_name_map
if _type_to_name_map=={}:
for name in _names:
if name[:2] == 'A_':
_type_to_name_map[eval(name)] = name[2:]
if _type_to_name_map.has_key(gtype):
return _type_to_name_map[gtype]
return 'TYPE=' + `gtype`
"""Map all file types to strings; unknown types become TYPE='x'."""
global _type_to_name_map
if _type_to_name_map=={}:
for name in _names:
if name[:2] == 'A_':
_type_to_name_map[eval(name)] = name[2:]
if _type_to_name_map.has_key(gtype):
return _type_to_name_map[gtype]
return 'TYPE=' + `gtype`
# Names for characters and strings
CRLF = '\r\n'
TAB = '\t'
# Send a selector to a given host and port, return a file with the reply
def send_selector(selector, host, port = 0):
import socket
import string
if not port:
i = string.find(host, ':')
if i >= 0:
host, port = host[:i], string.atoi(host[i+1:])
if not port:
port = DEF_PORT
elif type(port) == type(''):
port = string.atoi(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host, port)
s.send(selector + CRLF)
s.shutdown(1)
return s.makefile('rb')
"""Send a selector to a given host and port, return a file with the reply."""
import socket
import string
if not port:
i = string.find(host, ':')
if i >= 0:
host, port = host[:i], string.atoi(host[i+1:])
if not port:
port = DEF_PORT
elif type(port) == type(''):
port = string.atoi(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host, port)
s.send(selector + CRLF)
s.shutdown(1)
return s.makefile('rb')
# Send a selector and a query string
def send_query(selector, query, host, port = 0):
return send_selector(selector + '\t' + query, host, port)
"""Send a selector and a query string."""
return send_selector(selector + '\t' + query, host, port)
# Takes a path as returned by urlparse and returns the appropriate selector
def path_to_selector(path):
if path=="/":
return "/"
else:
return path[2:] # Cuts initial slash and data type identifier
"""Takes a path as returned by urlparse and returns the appropriate selector."""
if path=="/":
return "/"
else:
return path[2:] # Cuts initial slash and data type identifier
# Takes a path as returned by urlparse and maps it to a string
# See section 3.4 of RFC 1738 for details
def path_to_datatype_name(path):
if path=="/":
# No way to tell, although "INDEX" is likely
return "TYPE='unknown'"
else:
return type_to_name(path[1])
"""Takes a path as returned by urlparse and maps it to a string.
See section 3.4 of RFC 1738 for details."""
if path=="/":
# No way to tell, although "INDEX" is likely
return "TYPE='unknown'"
else:
return type_to_name(path[1])
# The following functions interpret the data returned by the gopher
# server according to the expected type, e.g. textfile or directory
# Get a directory in the form of a list of entries
def get_directory(f):
import string
list = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = string.splitfields(line[1:], TAB)
if len(parts) < 4:
print '(Bad line from server:', `line`, ')'
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
list.append(parts)
return list
"""Get a directory in the form of a list of entries."""
import string
list = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = string.splitfields(line[1:], TAB)
if len(parts) < 4:
print '(Bad line from server:', `line`, ')'
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
list.append(parts)
return list
# Get a text file as a list of lines, with trailing CRLF stripped
def get_textfile(f):
list = []
get_alt_textfile(f, list.append)
return list
"""Get a text file as a list of lines, with trailing CRLF stripped."""
list = []
get_alt_textfile(f, list.append)
return list
# Get a text file and pass each line to a function, with trailing CRLF stripped
def get_alt_textfile(f, func):
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if line[:2] == '..':
line = line[1:]
func(line)
"""Get a text file and pass each line to a function, with trailing CRLF stripped."""
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if line[:2] == '..':
line = line[1:]
func(line)
# Get a binary file as one solid data block
def get_binary(f):
data = f.read()
return data
"""Get a binary file as one solid data block."""
data = f.read()
return data
# Get a binary file and pass each block to a function
def get_alt_binary(f, func, blocksize):
while 1:
data = f.read(blocksize)
if not data:
break
func(data)
"""Get a binary file and pass each block to a function."""
while 1:
data = f.read(blocksize)
if not data:
break
func(data)
# Trivial test program
def test():
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], '')
selector = DEF_SELECTOR
type = selector[0]
host = DEF_HOST
port = DEF_PORT
if args:
host = args[0]
args = args[1:]
if args:
type = args[0]
args = args[1:]
if len(type) > 1:
type, selector = type[0], type
else:
selector = ''
if args:
selector = args[0]
args = args[1:]
query = ''
if args:
query = args[0]
args = args[1:]
if type == A_INDEX:
f = send_query(selector, query, host)
else:
f = send_selector(selector, host)
if type == A_TEXT:
list = get_textfile(f)
for item in list: print item
elif type in (A_MENU, A_INDEX):
list = get_directory(f)
for item in list: print item
else:
data = get_binary(f)
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
"""Trivial test program."""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], '')
selector = DEF_SELECTOR
type = selector[0]
host = DEF_HOST
port = DEF_PORT
if args:
host = args[0]
args = args[1:]
if args:
type = args[0]
args = args[1:]
if len(type) > 1:
type, selector = type[0], type
else:
selector = ''
if args:
selector = args[0]
args = args[1:]
query = ''
if args:
query = args[0]
args = args[1:]
if type == A_INDEX:
f = send_query(selector, query, host)
else:
f = send_selector(selector, host)
if type == A_TEXT:
list = get_textfile(f)
for item in list: print item
elif type in (A_MENU, A_INDEX):
list = get_directory(f)
for item in list: print item
else:
data = get_binary(f)
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
# Run the test when run as script
if __name__ == '__main__':
test()
test()

View file

@ -1,15 +1,15 @@
"""This module implements a function that reads and writes a gzipped file.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import time
import string
import zlib
import struct
import __builtin__
# implements a python function that reads and writes a gzipped file
# the user of the file doesn't have to worry about the compression,
# but random access is not allowed
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2

View file

@ -1,3 +1,5 @@
"""HTML character entity references."""
entitydefs = {
'AElig': '\306', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': '\301', # latin capital letter A with acute, U+00C1 ISOlat1

View file

@ -1,4 +1,4 @@
# Recognizing image files based on their first few bytes.
"""Recognize image file formats based on their first few bytes."""
#-------------------------#
@ -6,25 +6,25 @@
#-------------------------#
def what(file, h=None):
if h is None:
if type(file) == type(''):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
f = None
else:
f = None
try:
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
if h is None:
if type(file) == type(''):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
f = None
else:
f = None
try:
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
#---------------------------------#
@ -34,81 +34,81 @@ def what(file, h=None):
tests = []
def test_rgb(h, f):
# SGI image library
if h[:2] == '\001\332':
return 'rgb'
"""SGI image library"""
if h[:2] == '\001\332':
return 'rgb'
tests.append(test_rgb)
def test_gif(h, f):
# GIF ('87 and '89 variants)
if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif'
"""GIF ('87 and '89 variants)"""
if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif'
tests.append(test_gif)
def test_pbm(h, f):
# PBM (portable bitmap)
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm'
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm'
tests.append(test_pbm)
def test_pgm(h, f):
# PGM (portable graymap)
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm'
"""PGM (portable graymap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm'
tests.append(test_pgm)
def test_ppm(h, f):
# PPM (portable pixmap)
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm'
"""PPM (portable pixmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm'
tests.append(test_ppm)
def test_tiff(h, f):
# TIFF (can be in Motorola or Intel byte order)
if h[:2] in ('MM', 'II'):
return 'tiff'
"""TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in ('MM', 'II'):
return 'tiff'
tests.append(test_tiff)
def test_rast(h, f):
# Sun raster file
if h[:4] == '\x59\xA6\x6A\x95':
return 'rast'
"""Sun raster file"""
if h[:4] == '\x59\xA6\x6A\x95':
return 'rast'
tests.append(test_rast)
def test_xbm(h, f):
# X bitmap (X10 or X11)
s = '#define '
if h[:len(s)] == s:
return 'xbm'
"""X bitmap (X10 or X11)"""
s = '#define '
if h[:len(s)] == s:
return 'xbm'
tests.append(test_xbm)
def test_jpeg(h, f):
# JPEG data in JFIF format
if h[6:10] == 'JFIF':
return 'jpeg'
"""JPEG data in JFIF format"""
if h[6:10] == 'JFIF':
return 'jpeg'
tests.append(test_jpeg)
def test_bmp(h, f):
if h[:2] == 'BM':
return 'bmp'
if h[:2] == 'BM':
return 'bmp'
tests.append(test_bmp)
def test_png(h, f):
if h[:8] == "\211PNG\r\n\032\n":
return 'png'
if h[:8] == "\211PNG\r\n\032\n":
return 'png'
tests.append(test_png)
@ -117,37 +117,37 @@ def test_png(h, f):
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'

View file

@ -1,13 +1,14 @@
#! /usr/bin/env python
#
# Keywords (from "graminit.c")
#
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/keyword.py
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
python Lib/keyword.py
"""
kwlist = [
#--start keywords--

View file

@ -1,18 +1,20 @@
# Cache lines from files.
# This is intended to read lines from modules imported -- hence if a filename
# is not found, it will look down the module search path for a file by
# that name.
"""Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
from stat import *
def getline(filename, lineno):
lines = getlines(filename)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
lines = getlines(filename)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
@ -20,71 +22,71 @@ def getline(filename, lineno):
cache = {} # The cache
# Clear the cache entirely
def clearcache():
global cache
cache = {}
"""Clear the cache entirely."""
global cache
cache = {}
# Get the lines for a file from the cache.
# Update the cache if it doesn't contain an entry for this file already.
def getlines(filename):
if cache.has_key(filename):
return cache[filename][2]
else:
return updatecache(filename)
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if cache.has_key(filename):
return cache[filename][2]
else:
return updatecache(filename)
# Discard cache entries that are out of date.
# (This is not checked upon each call!)
def checkcache():
for filename in cache.keys():
size, mtime, lines, fullname = cache[filename]
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size <> stat[ST_SIZE] or mtime <> stat[ST_MTIME]:
del cache[filename]
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
for filename in cache.keys():
size, mtime, lines, fullname = cache[filename]
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size <> stat[ST_SIZE] or mtime <> stat[ST_MTIME]:
del cache[filename]
# Update a cache entry and return its list of lines.
# If something's wrong, print a message, discard the cache entry,
# and return an empty list.
def updatecache(filename):
if cache.has_key(filename):
del cache[filename]
if not filename or filename[0] + filename[-1] == '<>':
return []
fullname = filename
try:
stat = os.stat(fullname)
except os.error, msg:
# Try looking through the module search path
basename = os.path.split(filename)[1]
for dirname in sys.path:
fullname = os.path.join(dirname, basename)
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
# No luck
## print '*** Cannot stat', filename, ':', msg
return []
try:
fp = open(fullname, 'r')
lines = fp.readlines()
fp.close()
except IOError, msg:
## print '*** Cannot open', fullname, ':', msg
return []
size, mtime = stat[ST_SIZE], stat[ST_MTIME]
cache[filename] = size, mtime, lines, fullname
return lines
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if cache.has_key(filename):
del cache[filename]
if not filename or filename[0] + filename[-1] == '<>':
return []
fullname = filename
try:
stat = os.stat(fullname)
except os.error, msg:
# Try looking through the module search path
basename = os.path.split(filename)[1]
for dirname in sys.path:
fullname = os.path.join(dirname, basename)
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
# No luck
## print '*** Cannot stat', filename, ':', msg
return []
try:
fp = open(fullname, 'r')
lines = fp.readlines()
fp.close()
except IOError, msg:
## print '*** Cannot open', fullname, ':', msg
return []
size, mtime = stat[ST_SIZE], stat[ST_MTIME]
cache[filename] = size, mtime, lines, fullname
return lines

View file

@ -1,4 +1,4 @@
# module 'macpath' -- pathname (or -related) operations for the Macintosh
"""Pathname and path-related operations for the Macintosh."""
import string
import os
@ -10,77 +10,77 @@
normcase = string.lower
# Return true if a path is absolute.
# On the Mac, relative paths begin with a colon,
# but as a special case, paths with no colons at all are also relative.
# Anything else is absolute (the string up to the first colon is the
# volume name).
def isabs(s):
return ':' in s and s[0] <> ':'
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] <> ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] <> ':':
path = path + ':'
path = path + t
return path
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] <> ':':
path = path + ':'
path = path + t
return path
# Split a pathname in two parts: the directory leading up to the final bit,
# and the basename (the filename, without colons, in that directory).
# The result (s, t) is such that join(s, t) yields the original argument.
def split(s):
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i+1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i+1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on the Mac, the drive is always
# empty (don't use the volume name -- it doesn't have the same
# syntactic and semantic oddities as DOS drive letters, such as there
# being a separate current directory per drive).
def splitdrive(p):
return '', p
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
@ -89,14 +89,14 @@ def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
# Return true if the pathname refers to an existing directory.
def isdir(s):
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
@ -117,105 +117,103 @@ def getatime(filename):
return st[ST_MTIME]
# Return true if the pathname refers to a symbolic link.
# (Always false on the Mac, until we understand Aliases.)
def islink(s):
return 0
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
# Return true if the pathname refers to an existing regular file.
def isfile(s):
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
# Return true if the pathname refers to an existing file or directory.
def exists(s):
try:
st = os.stat(s)
except os.error:
return 0
return 1
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
#
# dummy expandvars to retain interface-compatability with other
# operating systems.
def expandvars(path):
return path
"""Dummy to retain interface-compatibility with other operating systems."""
return path
#
# dummy expanduser to retain interface-compatability with other
# operating systems.
def expanduser(path):
return path
# Normalize a pathname: get rid of '::' sequences by backing up,
# e.g., 'foo:bar::bletch' becomes 'foo:bletch'.
# Raise the exception norm_error below if backing up is impossible,
# e.g., for '::foo'.
# XXX The Unix version doesn't raise an exception but simply
# returns an unnormalized path. Should do so here too.
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
import string
if ':' not in s:
return ':' + s
f = string.splitfields(s, ':')
pre = []
post = []
if not f[0]:
pre = f[:1]
f = f[1:]
if not f[len(f)-1]:
post = f[-1:]
f = f[:-1]
res = []
for seg in f:
if seg:
res.append(seg)
else:
if not res: raise norm_error, 'path starts with ::'
del res[len(res)-1]
if not (pre or res):
raise norm_error, 'path starts with volume::'
if pre: res = pre + res
if post: res = res + post
s = res[0]
for seg in res[1:]:
s = s + ':' + seg
return s
"""Normalize a pathname: get rid of '::' sequences by backing up,
e.g., 'foo:bar::bletch' becomes 'foo:bletch'.
Raise the exception norm_error below if backing up is impossible,
e.g., for '::foo'."""
# XXX The Unix version doesn't raise an exception but simply
# returns an unnormalized path. Should do so here too.
import string
if ':' not in s:
return ':' + s
f = string.splitfields(s, ':')
pre = []
post = []
if not f[0]:
pre = f[:1]
f = f[1:]
if not f[len(f)-1]:
post = f[-1:]
f = f[:-1]
res = []
for seg in f:
if seg:
res.append(seg)
else:
if not res: raise norm_error, 'path starts with ::'
del res[len(res)-1]
if not (pre or res):
raise norm_error, 'path starts with volume::'
if pre: res = pre + res
if post: res = res + post
s = res[0]
for seg in res[1:]:
s = s + ':' + seg
return s
# Directory tree walk.
# For each directory under top (including top itself),
# func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
"""Directory tree walk.
For each directory under top (including top itself),
func(arg, dirname, filenames) is called, where
dirname is the name of the directory and filenames is the list
of files (and subdirectories etc.) in the directory.
The func may modify the filenames list, to implement a filter,
or to impose a different order of visiting."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Return an absolute path.
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)

View file

@ -9,8 +9,11 @@
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase,
e.g. 'text/plain') to a list of corresponding mailcap entries.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
@ -48,6 +51,14 @@ def listmailcapfiles():
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
@ -78,6 +89,11 @@ def readmailcapfile(fp):
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
@ -104,6 +120,7 @@ def parseline(line):
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]

View file

@ -1,57 +1,58 @@
# MH interface -- purely object-oriented (well, almost)
#
# Executive summary:
#
# import mhlib
#
# mh = mhlib.MH() # use default mailbox directory and profile
# mh = mhlib.MH(mailbox) # override mailbox location (default from profile)
# mh = mhlib.MH(mailbox, profile) # override mailbox and profile
#
# mh.error(format, ...) # print error message -- can be overridden
# s = mh.getprofile(key) # profile entry (None if not set)
# path = mh.getpath() # mailbox pathname
# name = mh.getcontext() # name of current folder
# mh.setcontext(name) # set name of current folder
#
# list = mh.listfolders() # names of top-level folders
# list = mh.listallfolders() # names of all folders, including subfolders
# list = mh.listsubfolders(name) # direct subfolders of given folder
# list = mh.listallsubfolders(name) # all subfolders of given folder
#
# mh.makefolder(name) # create new folder
# mh.deletefolder(name) # delete folder -- must have no subfolders
#
# f = mh.openfolder(name) # new open folder object
#
# f.error(format, ...) # same as mh.error(format, ...)
# path = f.getfullname() # folder's full pathname
# path = f.getsequencesfilename() # full pathname of folder's sequences file
# path = f.getmessagefilename(n) # full pathname of message n in folder
#
# list = f.listmessages() # list of messages in folder (as numbers)
# n = f.getcurrent() # get current message
# f.setcurrent(n) # set current message
# list = f.parsesequence(seq) # parse msgs syntax into list of messages
# n = f.getlast() # get last message (0 if no messagse)
# f.setlast(n) # set last message (internal use only)
#
# dict = f.getsequences() # dictionary of sequences in folder {name: list}
# f.putsequences(dict) # write sequences back to folder
#
# f.createmessage(n, fp) # add message from file f as number n
# f.removemessages(list) # remove messages in list from folder
# f.refilemessages(list, tofolder) # move messages in list to other folder
# f.movemessage(n, tofolder, ton) # move one message to a given destination
# f.copymessage(n, tofolder, ton) # copy one message to a given destination
#
# m = f.openmessage(n) # new open message object (costs a file descriptor)
# m is a derived class of mimetools.Message(rfc822.Message), with:
# s = m.getheadertext() # text of message's headers
# s = m.getheadertext(pred) # text of message's headers, filtered by pred
# s = m.getbodytext() # text of message's body, decoded
# s = m.getbodytext(0) # text of message's body, not decoded
#
"""MH interface -- purely object-oriented (well, almost)
Executive summary:
import mhlib
mh = mhlib.MH() # use default mailbox directory and profile
mh = mhlib.MH(mailbox) # override mailbox location (default from profile)
mh = mhlib.MH(mailbox, profile) # override mailbox and profile
mh.error(format, ...) # print error message -- can be overridden
s = mh.getprofile(key) # profile entry (None if not set)
path = mh.getpath() # mailbox pathname
name = mh.getcontext() # name of current folder
mh.setcontext(name) # set name of current folder
list = mh.listfolders() # names of top-level folders
list = mh.listallfolders() # names of all folders, including subfolders
list = mh.listsubfolders(name) # direct subfolders of given folder
list = mh.listallsubfolders(name) # all subfolders of given folder
mh.makefolder(name) # create new folder
mh.deletefolder(name) # delete folder -- must have no subfolders
f = mh.openfolder(name) # new open folder object
f.error(format, ...) # same as mh.error(format, ...)
path = f.getfullname() # folder's full pathname
path = f.getsequencesfilename() # full pathname of folder's sequences file
path = f.getmessagefilename(n) # full pathname of message n in folder
list = f.listmessages() # list of messages in folder (as numbers)
n = f.getcurrent() # get current message
f.setcurrent(n) # set current message
list = f.parsesequence(seq) # parse msgs syntax into list of messages
n = f.getlast() # get last message (0 if no messagse)
f.setlast(n) # set last message (internal use only)
dict = f.getsequences() # dictionary of sequences in folder {name: list}
f.putsequences(dict) # write sequences back to folder
f.createmessage(n, fp) # add message from file f as number n
f.removemessages(list) # remove messages in list from folder
f.refilemessages(list, tofolder) # move messages in list to other folder
f.movemessage(n, tofolder, ton) # move one message to a given destination
f.copymessage(n, tofolder, ton) # copy one message to a given destination
m = f.openmessage(n) # new open message object (costs a file descriptor)
m is a derived class of mimetools.Message(rfc822.Message), with:
s = m.getheadertext() # text of message's headers
s = m.getheadertext(pred) # text of message's headers, filtered by pred
s = m.getbodytext() # text of message's body, decoded
s = m.getbodytext(0) # text of message's body, not decoded
"""
# XXX To do, functionality:
# - annotate messages
# - send messages
@ -87,16 +88,15 @@
Error = 'mhlib.Error'
# Class representing a particular collection of folders.
# Optional constructor arguments are the pathname for the directory
# containing the collection, and the MH profile to use.
# If either is omitted or empty a default is used; the default
# directory is taken from the MH profile if it is specified there.
class MH:
"""Class representing a particular collection of folders.
Optional constructor arguments are the pathname for the directory
containing the collection, and the MH profile to use.
If either is omitted or empty a default is used; the default
directory is taken from the MH profile if it is specified there."""
# Constructor
def __init__(self, path = None, profile = None):
"""Constructor."""
if not profile: profile = MH_PROFILE
self.profile = os.path.expanduser(profile)
if not path: path = self.getprofile('Path')
@ -107,38 +107,38 @@ def __init__(self, path = None, profile = None):
if not os.path.isdir(path): raise Error, 'MH() path not found'
self.path = path
# String representation
def __repr__(self):
"""String representation."""
return 'MH(%s, %s)' % (`self.path`, `self.profile`)
# Routine to print an error. May be overridden by a derived class
def error(self, msg, *args):
"""Routine to print an error. May be overridden by a derived class."""
sys.stderr.write('MH error: %s\n' % (msg % args))
# Return a profile entry, None if not found
def getprofile(self, key):
"""Return a profile entry, None if not found."""
return pickline(self.profile, key)
# Return the path (the name of the collection's directory)
def getpath(self):
"""Return the path (the name of the collection's directory)."""
return self.path
# Return the name of the current folder
def getcontext(self):
"""Return the name of the current folder."""
context = pickline(os.path.join(self.getpath(), 'context'),
'Current-Folder')
if not context: context = 'inbox'
return context
# Set the name of the current folder
def setcontext(self, context):
"""Set the name of the current folder."""
fn = os.path.join(self.getpath(), 'context')
f = open(fn, "w")
f.write("Current-Folder: %s\n" % context)
f.close()
# Return the names of the top-level folders
def listfolders(self):
"""Return the names of the top-level folders."""
folders = []
path = self.getpath()
for name in os.listdir(path):
@ -148,9 +148,9 @@ def listfolders(self):
folders.sort()
return folders
# Return the names of the subfolders in a given folder
# (prefixed with the given folder name)
def listsubfolders(self, name):
"""Return the names of the subfolders in a given folder
(prefixed with the given folder name)."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
@ -173,12 +173,12 @@ def listsubfolders(self, name):
subfolders.sort()
return subfolders
# Return the names of all folders, including subfolders, recursively
def listallfolders(self):
"""Return the names of all folders and subfolders, recursively."""
return self.listallsubfolders('')
# Return the names of subfolders in a given folder, recursively
def listallsubfolders(self, name):
"""Return the names of subfolders in a given folder, recursively."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
@ -206,13 +206,12 @@ def listallsubfolders(self, name):
subfolders.sort()
return subfolders
# Return a new Folder object for the named folder
def openfolder(self, name):
"""Return a new Folder object for the named folder."""
return Folder(self, name)
# Create a new folder. This raises os.error if the folder
# cannot be created
def makefolder(self, name):
"""Create a new folder (or raise os.error if it cannot be created)."""
protect = pickline(self.profile, 'Folder-Protect')
if protect and isnumeric(protect):
mode = string.atoi(protect, 8)
@ -220,10 +219,9 @@ def makefolder(self, name):
mode = FOLDER_PROTECT
os.mkdir(os.path.join(self.getpath(), name), mode)
# Delete a folder. This removes files in the folder but not
# subdirectories. If deleting the folder itself fails it
# raises os.error
def deletefolder(self, name):
"""Delete a folder. This removes files in the folder but not
subdirectories. Raise os.error if deleting the folder itself fails."""
fullname = os.path.join(self.getpath(), name)
for subname in os.listdir(fullname):
fullsubname = os.path.join(fullname, subname)
@ -235,52 +233,51 @@ def deletefolder(self, name):
os.rmdir(fullname)
# Class representing a particular folder
numericprog = re.compile('^[1-9][0-9]*$')
def isnumeric(str):
return numericprog.match(str) is not None
class Folder:
"""Class representing a particular folder."""
# Constructor
def __init__(self, mh, name):
"""Constructor."""
self.mh = mh
self.name = name
if not os.path.isdir(self.getfullname()):
raise Error, 'no folder %s' % name
# String representation
def __repr__(self):
"""String representation."""
return 'Folder(%s, %s)' % (`self.mh`, `self.name`)
# Error message handler
def error(self, *args):
"""Error message handler."""
apply(self.mh.error, args)
# Return the full pathname of the folder
def getfullname(self):
"""Return the full pathname of the folder."""
return os.path.join(self.mh.path, self.name)
# Return the full pathname of the folder's sequences file
def getsequencesfilename(self):
"""Return the full pathname of the folder's sequences file."""
return os.path.join(self.getfullname(), MH_SEQUENCES)
# Return the full pathname of a message in the folder
def getmessagefilename(self, n):
"""Return the full pathname of a message in the folder."""
return os.path.join(self.getfullname(), str(n))
# Return list of direct subfolders
def listsubfolders(self):
"""Return list of direct subfolders."""
return self.mh.listsubfolders(self.name)
# Return list of all subfolders
def listallsubfolders(self):
"""Return list of all subfolders."""
return self.mh.listallsubfolders(self.name)
# Return the list of messages currently present in the folder.
# As a side effect, set self.last to the last message (or 0)
def listmessages(self):
"""Return the list of messages currently present in the folder.
As a side effect, set self.last to the last message (or 0)."""
messages = []
match = numericprog.match
append = messages.append
@ -295,8 +292,8 @@ def listmessages(self):
self.last = 0
return messages
# Return the set of sequences for the folder
def getsequences(self):
"""Return the set of sequences for the folder."""
sequences = {}
fullname = self.getsequencesfilename()
try:
@ -315,8 +312,8 @@ def getsequences(self):
sequences[key] = value
return sequences
# Write the set of sequences back to the folder
def putsequences(self, sequences):
"""Write the set of sequences back to the folder."""
fullname = self.getsequencesfilename()
f = None
for key in sequences.keys():
@ -332,23 +329,23 @@ def putsequences(self, sequences):
else:
f.close()
# Return the current message. Raise KeyError when there is none
def getcurrent(self):
"""Return the current message. Raise KeyError when there is none."""
seqs = self.getsequences()
try:
return max(seqs['cur'])
except (ValueError, KeyError):
raise Error, "no cur message"
# Set the current message
def setcurrent(self, n):
"""Set the current message."""
updateline(self.getsequencesfilename(), 'cur', str(n), 0)
# Parse an MH sequence specification into a message list.
# Attempt to mimic mh-sequence(5) as close as possible.
# Also attempt to mimic observed behavior regarding which
# conditions cause which error messages
def parsesequence(self, seq):
"""Parse an MH sequence specification into a message list.
Attempt to mimic mh-sequence(5) as close as possible.
Also attempt to mimic observed behavior regarding which
conditions cause which error messages."""
# XXX Still not complete (see mh-format(5)).
# Missing are:
# - 'prev', 'next' as count
@ -428,8 +425,8 @@ def parsesequence(self, seq):
else:
return [n]
# Internal: parse a message number (or cur, first, etc.)
def _parseindex(self, seq, all):
"""Internal: parse a message number (or cur, first, etc.)."""
if isnumeric(seq):
try:
return string.atoi(seq)
@ -459,12 +456,12 @@ def _parseindex(self, seq, all):
raise Error, "no prev message"
raise Error, None
# Open a message -- returns a Message object
def openmessage(self, n):
"""Open a message -- returns a Message object."""
return Message(self, n)
# Remove one or more messages -- may raise os.error
def removemessages(self, list):
"""Remove one or more messages -- may raise os.error."""
errors = []
deleted = []
for n in list:
@ -488,9 +485,9 @@ def removemessages(self, list):
else:
raise os.error, ('multiple errors:', errors)
# Refile one or more messages -- may raise os.error.
# 'tofolder' is an open folder object
def refilemessages(self, list, tofolder, keepsequences=0):
"""Refile one or more messages -- may raise os.error.
'tofolder' is an open folder object."""
errors = []
refiled = {}
for n in list:
@ -523,8 +520,8 @@ def refilemessages(self, list, tofolder, keepsequences=0):
else:
raise os.error, ('multiple errors:', errors)
# Helper for refilemessages() to copy sequences
def _copysequences(self, fromfolder, refileditems):
"""Helper for refilemessages() to copy sequences."""
fromsequences = fromfolder.getsequences()
tosequences = self.getsequences()
changed = 0
@ -544,9 +541,9 @@ def _copysequences(self, fromfolder, refileditems):
if changed:
self.putsequences(tosequences)
# Move one message over a specific destination message,
# which may or may not already exist.
def movemessage(self, n, tofolder, ton):
"""Move one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
@ -576,9 +573,9 @@ def movemessage(self, n, tofolder, ton):
os.unlink(path)
self.removefromallsequences([n])
# Copy one message over a specific destination message,
# which may or may not already exist.
def copymessage(self, n, tofolder, ton):
"""Copy one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
@ -602,8 +599,8 @@ def copymessage(self, n, tofolder, ton):
except os.error:
pass
# Create a message, with text from the open file txt.
def createmessage(self, n, txt):
"""Create a message, with text from the open file txt."""
path = self.getmessagefilename(n)
backuppath = self.getmessagefilename(',%d' % n)
try:
@ -628,9 +625,9 @@ def createmessage(self, n, txt):
except os.error:
pass
# Remove one or more messages from all sequeuces (including last)
# -- but not from 'cur'!!!
def removefromallsequences(self, list):
"""Remove one or more messages from all sequeuces (including last)
-- but not from 'cur'!!!"""
if hasattr(self, 'last') and self.last in list:
del self.last
sequences = self.getsequences()
@ -647,14 +644,14 @@ def removefromallsequences(self, list):
if changed:
self.putsequences(sequences)
# Return the last message number
def getlast(self):
"""Return the last message number."""
if not hasattr(self, 'last'):
messages = self.listmessages()
return self.last
# Set the last message number
def setlast(self, last):
"""Set the last message number."""
if last is None:
if hasattr(self, 'last'):
del self.last
@ -663,8 +660,8 @@ def setlast(self, last):
class Message(mimetools.Message):
# Constructor
def __init__(self, f, n, fp = None):
"""Constructor."""
self.folder = f
self.number = n
if not fp:
@ -672,15 +669,15 @@ def __init__(self, f, n, fp = None):
fp = open(path, 'r')
mimetools.Message.__init__(self, fp)
# String representation
def __repr__(self):
"""String representation."""
return 'Message(%s, %s)' % (repr(self.folder), self.number)
# Return the message's header text as a string. If an
# argument is specified, it is used as a filter predicate to
# decide which headers to return (its argument is the header
# name converted to lower case).
def getheadertext(self, pred = None):
"""Return the message's header text as a string. If an
argument is specified, it is used as a filter predicate to
decide which headers to return (its argument is the header
name converted to lower case)."""
if not pred:
return string.joinfields(self.headers, '')
headers = []
@ -693,11 +690,11 @@ def getheadertext(self, pred = None):
if hit: headers.append(line)
return string.joinfields(headers, '')
# Return the message's body text as string. This undoes a
# Content-Transfer-Encoding, but does not interpret other MIME
# features (e.g. multipart messages). To suppress to
# decoding, pass a 0 as argument
def getbodytext(self, decode = 1):
"""Return the message's body text as string. This undoes a
Content-Transfer-Encoding, but does not interpret other MIME
features (e.g. multipart messages). To suppress decoding,
pass 0 as an argument."""
self.fp.seek(self.startofbody)
encoding = self.getencoding()
if not decode or encoding in ('', '7bit', '8bit', 'binary'):
@ -707,10 +704,10 @@ def getbodytext(self, decode = 1):
mimetools.decode(self.fp, output, encoding)
return output.getvalue()
# Only for multipart messages: return the message's body as a
# list of SubMessage objects. Each submessage object behaves
# (almost) as a Message object.
def getbodyparts(self):
"""Only for multipart messages: return the message's body as a
list of SubMessage objects. Each submessage object behaves
(almost) as a Message object."""
if self.getmaintype() != 'multipart':
raise Error, 'Content-Type is not multipart/*'
bdry = self.getparam('boundary')
@ -727,8 +724,8 @@ def getbodyparts(self):
mf.pop()
return parts
# Return body, either a string or a list of messages
def getbody(self):
"""Return body, either a string or a list of messages."""
if self.getmaintype() == 'multipart':
return self.getbodyparts()
else:
@ -737,8 +734,8 @@ def getbody(self):
class SubMessage(Message):
# Constructor
def __init__(self, f, n, fp):
"""Constructor."""
Message.__init__(self, f, n, fp)
if self.getmaintype() == 'multipart':
self.body = Message.getbodyparts(self)
@ -747,8 +744,8 @@ def __init__(self, f, n, fp):
self.bodyencoded = Message.getbodytext(self, decode=0)
# XXX If this is big, should remember file pointers
# String representation
def __repr__(self):
"""String representation."""
f, n, fp = self.folder, self.number, self.fp
return 'SubMessage(%s, %s, %s)' % (f, n, fp)
@ -766,28 +763,28 @@ def getbody(self):
return self.body
# Class implementing sets of integers.
#
# This is an efficient representation for sets consisting of several
# continuous ranges, e.g. 1-100,200-400,402-1000 is represented
# internally as a list of three pairs: [(1,100), (200,400),
# (402,1000)]. The internal representation is always kept normalized.
#
# The constructor has up to three arguments:
# - the string used to initialize the set (default ''),
# - the separator between ranges (default ',')
# - the separator between begin and end of a range (default '-')
# The separators must be strings (not regexprs) and should be different.
#
# The tostring() function yields a string that can be passed to another
# IntSet constructor; __repr__() is a valid IntSet constructor itself.
#
# XXX The default begin/end separator means that negative numbers are
# not supported very well.
#
# XXX There are currently no operations to remove set elements.
class IntSet:
"""Class implementing sets of integers.
This is an efficient representation for sets consisting of several
continuous ranges, e.g. 1-100,200-400,402-1000 is represented
internally as a list of three pairs: [(1,100), (200,400),
(402,1000)]. The internal representation is always kept normalized.
The constructor has up to three arguments:
- the string used to initialize the set (default ''),
- the separator between ranges (default ',')
- the separator between begin and end of a range (default '-')
The separators must be strings (not regexprs) and should be different.
The tostring() function yields a string that can be passed to another
IntSet constructor; __repr__() is a valid IntSet constructor itself.
"""
# XXX The default begin/end separator means that negative numbers are
# not supported very well.
#
# XXX There are currently no operations to remove set elements.
def __init__(self, data = None, sep = ',', rng = '-'):
self.pairs = []

View file

@ -1,4 +1,4 @@
# Various tools used by MIME-reading or MIME-writing programs.
"""Various tools used by MIME-reading or MIME-writing programs."""
import os
@ -7,10 +7,9 @@
import tempfile
# A derived class of rfc822.Message that knows about MIME headers and
# contains some hooks for decoding encoded and multipart messages.
class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable)
@ -96,17 +95,17 @@ def getsubtype(self):
# -----------------
# Return a random string usable as a multipart boundary.
# The method used is so that it is *very* unlikely that the same
# string of characters will every occur again in the Universe,
# so the caller needn't check the data it is packing for the
# occurrence of the boundary.
#
# The boundary contains dots so you have to quote it in the header.
_prefix = None
def choose_boundary():
"""Return a random string usable as a multipart boundary.
The method used is so that it is *very* unlikely that the same
string of characters will every occur again in the Universe,
so the caller needn't check the data it is packing for the
occurrence of the boundary.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
import random
@ -131,6 +130,7 @@ def choose_boundary():
# Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.decode(input, output)
@ -147,6 +147,7 @@ def decode(input, output, encoding):
'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)

View file

@ -2,7 +2,7 @@
'''Mimification and unmimification of mail messages.
decode quoted-printable parts of a mail message or encode using
Decode quoted-printable parts of a mail message or encode using
quoted-printable.
Usage:
@ -39,9 +39,8 @@
repl = re.compile('^subject:\\s+re: ', re.I)
class File:
'''A simple fake file object that knows about limited
read-ahead and boundaries.
The only supported method is readline().'''
"""A simple fake file object that knows about limited read-ahead and
boundaries. The only supported method is readline()."""
def __init__(self, file, boundary):
self.file = file
@ -87,7 +86,7 @@ def readline(self):
self.peek = None
def mime_decode(line):
'''Decode a single line of quoted-printable text to 8bit.'''
"""Decode a single line of quoted-printable text to 8bit."""
newline = ''
pos = 0
while 1:
@ -100,7 +99,7 @@ def mime_decode(line):
return newline + line[pos:]
def mime_decode_header(line):
'''Decode a header line to 8bit.'''
"""Decode a header line to 8bit."""
newline = ''
pos = 0
while 1:
@ -115,7 +114,7 @@ def mime_decode_header(line):
return newline + line[pos:]
def unmimify_part(ifile, ofile, decode_base64 = 0):
'''Convert a quoted-printable part of a MIME mail message to 8bit.'''
"""Convert a quoted-printable part of a MIME mail message to 8bit."""
multipart = None
quoted_printable = 0
is_base64 = 0
@ -200,7 +199,7 @@ def unmimify_part(ifile, ofile, decode_base64 = 0):
ofile.write(pref + line)
def unmimify(infile, outfile, decode_base64 = 0):
'''Convert quoted-printable parts of a MIME mail message to 8bit.'''
"""Convert quoted-printable parts of a MIME mail message to 8bit."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:
@ -221,8 +220,8 @@ def unmimify(infile, outfile, decode_base64 = 0):
mime_header_char = re.compile('[=?\177-\377]') # quote these in header
def mime_encode(line, header):
'''Code a single line as quoted-printable.
If header is set, quote some extra characters.'''
"""Code a single line as quoted-printable.
If header is set, quote some extra characters."""
if header:
reg = mime_header_char
else:
@ -255,7 +254,7 @@ def mime_encode(line, header):
mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)([ \t)]|\n)')
def mime_encode_header(line):
'''Code a single header line as quoted-printable.'''
"""Code a single header line as quoted-printable."""
newline = ''
pos = 0
while 1:
@ -273,7 +272,7 @@ def mime_encode_header(line):
iso_char = re.compile('[\177-\377]')
def mimify_part(ifile, ofile, is_mime):
'''Convert an 8bit part of a MIME mail message to quoted-printable.'''
"""Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0
multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0
@ -408,7 +407,7 @@ def mimify_part(ifile, ofile, is_mime):
ofile.write(line)
def mimify(infile, outfile):
'''Convert 8bit parts of a MIME mail message to quoted-printable.'''
"""Convert 8bit parts of a MIME mail message to quoted-printable."""
if type(infile) == type(''):
ifile = open(infile)
if type(outfile) == type('') and infile == outfile:

View file

@ -1,28 +1,31 @@
# A class that makes each part of a multipart message "feel" like an
# ordinary file, as long as you use fp.readline(). Allows recursive
# use, for nested multipart messages. Probably best used together
# with module mimetools.
#
# Suggested use:
#
# real_fp = open(...)
# fp = MultiFile(real_fp)
#
# "read some lines from fp"
# fp.push(separator)
# while 1:
# "read lines from fp until it returns an empty string" (A)
# if not fp.next(): break
# fp.pop()
# "read remaining lines from fp until it returns an empty string"
#
# The latter sequence may be used recursively at (A).
# It is also allowed to use multiple push()...pop() sequences.
#
# If seekable is given as 0, the class code will not do the bookeeping
# it normally attempts in order to make seeks relative to the beginning of the
# current file part. This may be useful when using MultiFile with a non-
# seekable stream object.
"""A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
import sys
import string
@ -30,9 +33,9 @@
Error = 'multifile.Error'
class MultiFile:
#
seekable = 0
#
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = [] # Grows down
@ -42,12 +45,12 @@ def __init__(self, fp, seekable=1):
self.seekable = 1
self.start = self.fp.tell()
self.posstack = [] # Grows down
#
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
#
def seek(self, pos, whence=0):
here = self.tell()
if whence:
@ -64,7 +67,7 @@ def seek(self, pos, whence=0):
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
#
def readline(self):
if self.level > 0:
return ''
@ -105,7 +108,7 @@ def readline(self):
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
#
def readlines(self):
list = []
while 1:
@ -113,10 +116,10 @@ def readlines(self):
if not line: break
list.append(line)
return list
#
def read(self): # Note: no size argument -- read until EOF only!
return string.joinfields(self.readlines(), '')
#
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
@ -126,7 +129,7 @@ def next(self):
if self.seekable:
self.start = self.fp.tell()
return 1
#
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
@ -134,7 +137,7 @@ def push(self, sep):
if self.seekable:
self.posstack.insert(0, self.start)
self.start = self.fp.tell()
#
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
@ -149,12 +152,12 @@ def pop(self):
del self.posstack[0]
if self.level > 0:
self.lastpos = abslastpos - self.start
#
def is_data(self, line):
return line[:2] <> '--'
#
def section_divider(self, str):
return "--" + str
#
def end_marker(self, str):
return "--" + str + "--"

View file

@ -1,58 +1,51 @@
# Mutual exclusion -- for use with module sched
"""Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is aquired.
"""
# A mutex has two pieces of state -- a 'locked' bit and a queue.
# When the mutex is not locked, the queue is empty.
# Otherwise, the queue contains 0 or more (function, argument) pairs
# representing functions (or methods) waiting to acquire the lock.
# When the mutex is unlocked while the queue is not empty,
# the first queue entry is removed and its function(argument) pair called,
# implying it now has the lock.
#
# Of course, no multi-threading is implied -- hence the funny interface
# for lock, where a function is called once the lock is aquired.
#
class mutex:
#
# Create a new mutex -- initially unlocked
#
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = 0
self.queue = []
#
# Test the locked bit of the mutex
#
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
#
# Atomic test-and-set -- grab the lock if it is not set,
# return true if it succeeded
#
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return true if it succeeded."""
if not self.locked:
self.locked = 1
return 1
else:
return 0
#
# Lock a mutex, call the function with supplied argument
# when it is acquired.
# If the mutex is already locked, place function and argument
# in the queue.
#
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
#
# Unlock a mutex. If the queue is not empty, call the next
# function with its argument.
#
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue[0]
del self.queue[0]
function(argument)
else:
self.locked = 0
#

View file

@ -1,3 +1,5 @@
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex
@ -63,7 +65,7 @@ def __init__(self, file=None):
raise SyntaxError, "bad follower token %s, file %s, line %d"%(tt,file,lexer.lineno)
def authenticators(self, host):
"Return a (user, account, password) tuple for given host."
"""Return a (user, account, password) tuple for given host."""
if self.hosts.has_key(host):
return self.hosts[host]
elif self.hosts.has_key('default'):
@ -72,7 +74,7 @@ def authenticators(self, host):
return None
def __repr__(self):
"Dump the class data in the format of a .netrc file"
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]

View file

@ -1,31 +1,31 @@
# An NNTP client class. Based on RFC 977: Network News Transfer
# Protocol, by Brian Kantor and Phil Lapsley.
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
# Example:
#
# >>> from nntplib import NNTP
# >>> s = NNTP('news')
# >>> resp, count, first, last, name = s.group('comp.lang.python')
# >>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
# Group comp.lang.python has 51 articles, range 5770 to 5821
# >>> resp, subs = s.xhdr('subject', first + '-' + last)
# >>> resp = s.quit()
# >>>
#
# Here 'resp' is the server response line.
# Error responses are turned into exceptions.
#
# To post an article from a file:
# >>> f = open(filename, 'r') # file containing article, including header
# >>> resp = s.post(f)
# >>>
#
# For descriptions of all methods, read the comments in the code below.
# Note that all arguments and return values representing article numbers
# are strings, not numbers, since they are rarely used for calculations.
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
# (xover, xgtitle, xpath, date methods by Kevan Heydon)
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
@ -35,7 +35,6 @@
# Exception raised when an error or invalid response is received
error_reply = 'nntplib.error_reply' # unexpected [123]xx reply
error_temp = 'nntplib.error_temp' # 4xx errors
error_perm = 'nntplib.error_perm' # 5xx errors
@ -59,11 +58,11 @@
class NNTP:
# Initialize an instance. Arguments:
# - host: hostname to connect to
# - port: port to connect to (default the standard NNTP port)
def __init__(self, host, port = NNTP_PORT, user=None, password=None):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@ -82,38 +81,38 @@ def __init__(self, host, port = NNTP_PORT, user=None, password=None):
if resp[:3] != '281':
raise error_perm, resp
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', `self.welcome`
return self.welcome
# Set the debugging level. Argument level means:
# 0: no debugging output (default)
# 1: print commands and responses but not body text etc.
# 2: also print raw lines read and sent before stripping CR/LF
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
# Internal: send one line to the server, appending CRLF
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', `line`
self.sock.send(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', `line`
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', `line`
@ -122,9 +121,9 @@ def getline(self):
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', `resp`
c = resp[:1]
@ -136,9 +135,9 @@ def getresp(self):
raise error_proto, resp
return resp
# Internal: get a response plus following text from the server.
# Raise various errors if the response indicates an error
def getlongresp(self):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise error_reply, resp
@ -152,59 +151,59 @@ def getlongresp(self):
list.append(line)
return resp, list
# Internal: send a command and get the response
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
# Internal: send a command and get the response plus following text
def longcmd(self, line):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp()
# Process a NEWGROUPS command. Arguments:
# - date: string 'yymmdd' indicating the date
# - time: string 'hhmmss' indicating the time
# Return:
# - resp: server response if succesful
# - list: list of newsgroup names
def newgroups(self, date, time):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if succesful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time)
# Process a NEWNEWS command. Arguments:
# - group: group name or '*'
# - date: string 'yymmdd' indicating the date
# - time: string 'hhmmss' indicating the time
# Return:
# - resp: server response if succesful
# - list: list of article ids
def newnews(self, group, date, time):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if succesful
- list: list of article ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd)
# Process a LIST command. Return:
# - resp: server response if succesful
# - list: list of (group, last, first, flag) (strings)
def list(self):
"""Process a LIST command. Return:
- resp: server response if succesful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST')
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(string.split(list[i]))
return resp, list
# Process a GROUP command. Argument:
# - group: the group name
# Returns:
# - resp: server response if succesful
# - count: number of articles (string)
# - first: first article number (string)
# - last: last article number (string)
# - name: the group name
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if succesful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] <> '211':
raise error_reply, resp
@ -221,15 +220,15 @@ def group(self, name):
name = string.lower(words[4])
return resp, count, first, last, name
# Process a HELP command. Returns:
# - resp: server response if succesful
# - list: list of strings
def help(self):
"""Process a HELP command. Returns:
- resp: server response if succesful
- list: list of strings"""
return self.longcmd('HELP')
# Internal: parse the response of a STAT, NEXT or LAST command
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] <> '22':
raise error_reply, resp
words = string.split(resp)
@ -242,84 +241,82 @@ def statparse(self, resp):
id = words[2]
return resp, nr, id
# Internal: process a STAT, NEXT or LAST command
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
# Process a STAT command. Argument:
# - id: article number or message id
# Returns:
# - resp: server response if succesful
# - nr: the article number
# - id: the article id
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if succesful
- nr: the article number
- id: the article id"""
return self.statcmd('STAT ' + id)
# Process a NEXT command. No arguments. Return as for STAT
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
# Process a LAST command. No arguments. Return as for STAT
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
# Internal: process a HEAD, BODY or ARTICLE command
def artcmd(self, line):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
# Process a HEAD command. Argument:
# - id: article number or message id
# Returns:
# - resp: server response if succesful
# - nr: article number
# - id: message id
# - list: the lines of the article's header
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if succesful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
# Process a BODY command. Argument:
# - id: article number or message id
# Returns:
# - resp: server response if succesful
# - nr: article number
# - id: message id
# - list: the lines of the article's body
def body(self, id):
"""Process a BODY command. Argument:
- id: article number or message id
Returns:
- resp: server response if succesful
- nr: article number
- id: message id
- list: the lines of the article's body"""
return self.artcmd('BODY ' + id)
# Process an ARTICLE command. Argument:
# - id: article number or message id
# Returns:
# - resp: server response if succesful
# - nr: article number
# - id: message id
# - list: the lines of the article
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if succesful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
# Process a SLAVE command. Returns:
# - resp: server response if succesful
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if succesful"""
return self.shortcmd('SLAVE')
# Process an XHDR command (optional server extension). Arguments:
# - hdr: the header type (e.g. 'subject')
# - str: an article nr, a message id, or a range nr1-nr2
# Returns:
# - resp: server response if succesful
# - list: list of (nr, value) strings
def xhdr(self, hdr, str):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if succesful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str)
for i in range(len(lines)):
@ -329,14 +326,15 @@ def xhdr(self, hdr, str):
lines[i] = m.group(1, 2)
return resp, lines
# Process an XOVER command (optional server extension) Arguments:
# - start: start of range
# - end: end of range
# Returns:
# - resp: server response if succesful
# - list: list of (art-nr, subject, poster, date, id, refrences, size, lines)
def xover(self,start,end):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if succesful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end)
xover_lines = []
for line in lines:
@ -354,13 +352,13 @@ def xover(self,start,end):
raise error_data,line
return resp,xover_lines
# Process an XGTITLE command (optional server extension) Arguments:
# - group: group name wildcard (i.e. news.*)
# Returns:
# - resp: server response if succesful
# - list: list of (name,title) strings
def xgtitle(self, group):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if succesful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group)
lines = []
@ -370,13 +368,13 @@ def xgtitle(self, group):
lines.append(match.group(1, 2))
return resp, lines
# Process an XPATH command (optional server extension) Arguments:
# - id: Message id of article
# Returns:
# resp: server response if succesful
# path: directory path to article
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if succesful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] <> '223':
raise error_reply, resp
@ -387,14 +385,14 @@ def xpath(self,id):
else:
return resp, path
# Process the DATE command. Arguments:
# None
# Returns:
# resp: server response if succesful
# date: Date suitable for newnews/newgroups commands etc.
# time: Time suitable for newnews/newgroups commands etc.
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if succesful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] <> '111':
raise error_reply, resp
@ -408,12 +406,12 @@ def date (self):
return resp, date, time
# Process a POST command. Arguments:
# - f: file containing the article
# Returns:
# - resp: server response if succesful
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if succesful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] <> '3':
@ -430,14 +428,14 @@ def post(self, f):
self.putline('.')
return self.getresp()
# Process an IHAVE command. Arguments:
# - id: message-id of the article
# - f: file containing the article
# Returns:
# - resp: server response if succesful
# Note that if the server refuses the article an exception is raised
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if succesful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] <> '3':
@ -454,10 +452,10 @@ def ihave(self, id, f):
self.putline('.')
return self.getresp()
# Process a QUIT command and close the socket. Returns:
# - resp: server response if succesful
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if succesful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
@ -465,8 +463,8 @@ def quit(self):
return resp
# Minimal test function
def _test():
"""Minimal test function."""
s = NNTP('news')
resp, count, first, last, name = s.group('comp.lang.python')
print resp

View file

@ -1,6 +1,4 @@
#
# nturl2path convert a NT pathname to a file URL and
# vice versa
"""Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
""" Convert a URL to a DOS path...
@ -34,7 +32,6 @@ def url2pathname(url):
return path
def pathname2url(p):
""" Convert a DOS path name to a file url...
C:\foo\bar\spam.foo

View file

@ -1,21 +1,22 @@
# os.py -- either mac, dos or posix depending on what system we're on.
"""os.py -- either mac, dos or posix depending on what system we're on.
# This exports:
# - all functions from either posix or mac, e.g., os.unlink, os.stat, etc.
# - os.path is either module posixpath or macpath
# - os.name is either 'posix' or 'mac'
# - os.curdir is a string representing the current directory ('.' or ':')
# - os.pardir is a string representing the parent directory ('..' or '::')
# - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
# - os.altsep is the alternatte pathname separator (None or '/')
# - os.pathsep is the component separator used in $PATH etc
# - os.defpath is the default search path for executables
This exports:
- all functions from either posix or mac, e.g., os.unlink, os.stat, etc.
- os.path is either module posixpath or macpath
- os.name is either 'posix' or 'mac'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.altsep is the alternatte pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.defpath is the default search path for executables
# Programs that import and use 'os' stand a better chance of being
# portable between different platforms. Of course, they must then
# only use functions that are defined by all platforms (e.g., unlink
# and opendir), and leave all pathname manipulation to os.path
# (e.g., split and join).
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
import sys

View file

@ -1,6 +1,6 @@
#! /usr/bin/env python
# pdb.py -- finally, a Python debugger!
"""pdb.py -- finally, a Python debugger!"""
# (See pdb.doc for documentation.)
@ -106,18 +106,18 @@ def execRcLines(self):
# Override Bdb methods (except user_call, for now)
def user_line(self, frame):
# This function is called when we stop or break at this line
"""This function is called when we stop or break at this line."""
self.interaction(frame, None)
def user_return(self, frame, return_value):
# This function is called when a return trap is set here
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
# This function is called if an exception occurs,
# but only if we are to stop at or just below this level
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
@ -148,7 +148,7 @@ def default(self, line):
print '***', exc_type_name + ':', v
def precmd(self, line):
# Handle alias expansion and ';;' separator
"""Handle alias expansion and ';;' separator."""
if not line:
return line
args = string.split(line)
@ -262,7 +262,7 @@ def do_break(self, arg, temporary = 0):
# To be overridden in derived debuggers
def defaultFile(self):
# Produce a reasonable default
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and mainpyfile:
filename = mainpyfile
@ -384,7 +384,7 @@ def do_condition(self, arg):
print 'is now unconditional.'
def do_ignore(self,arg):
# arg is bp number followed by ignore count
"""arg is bp number followed by ignore count."""
args = string.split(arg)
bpnum = int(string.strip(args[0]))
try:
@ -406,10 +406,10 @@ def do_ignore(self,arg):
print bpnum, 'is reached.'
def do_clear(self, arg):
# Three possibilities, tried in this order:
# clear -> clear all breaks, ask for confirmation
# clear file:lineno -> clear all breaks at file:lineno
# clear bpno bpno ... -> clear breakpoints by number
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
@ -851,9 +851,8 @@ def help_unalias(self):
def help_pdb(self):
help()
# Helper function for break/clear parsing -- may be overridden
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden."""
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'

View file

@ -1,4 +1,4 @@
"""create portable serialized representations of Python objects.
"""Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.

View file

@ -1,63 +1,62 @@
# Conversion pipeline templates
# =============================
"""Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format
(e.g. from GIF image format to PPM image format). Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
# The problem:
# ------------
#
# Suppose you have some data that you want to convert to another format
# (e.g. from GIF image format to PPM image format). Maybe the
# conversion involves several steps (e.g. piping it through compress or
# uuencode). Some of the conversion steps may require that their input
# is a disk file, others may be able to read standard input; similar for
# their output. The input to the entire conversion may also be read
# from a disk file or from an open file, and similar for its output.
#
# The module lets you construct a pipeline template by sticking one or
# more conversion steps together. It will take care of creating and
# removing temporary files if they are necessary to hold intermediate
# data. You can then use the template to do conversions from many
# different sources to many different destinations. The temporary
# file names used are different each time the template is used.
#
# The templates are objects so you can create templates for many
# different conversion steps and store them in a dictionary, for
# instance.
Directions:
-----------
To create a template:
t = Template()
# Directions:
# -----------
#
# To create a template:
# t = Template()
#
# To add a conversion step to a template:
# t.append(command, kind)
# where kind is a string of two characters: the first is '-' if the
# command reads its standard input or 'f' if it requires a file; the
# second likewise for the output. The command must be valid /bin/sh
# syntax. If input or output files are required, they are passed as
# $IN and $OUT; otherwise, it must be possible to use the command in
# a pipeline.
#
# To add a conversion step at the beginning:
# t.prepend(command, kind)
#
# To convert a file to another file using a template:
# sts = t.copy(infile, outfile)
# If infile or outfile are the empty string, standard input is read or
# standard output is written, respectively. The return value is the
# exit status of the conversion pipeline.
#
# To open a file for reading or writing through a conversion pipeline:
# fp = t.open(file, mode)
# where mode is 'r' to read the file, or 'w' to write it -- just like
# for the built-in function open() or for os.popen().
#
# To create a new template object initialized to a given one:
# t2 = t.clone()
#
# For an example, see the function test() at the end of the file.
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
For an example, see the function test() at the end of the file.
"""
import sys
@ -81,37 +80,36 @@
SOURCE, SINK]
# A pipeline template is a Template object:
class Template:
"""Class representing a pipeline template."""
# Template() returns a fresh pipeline template
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
# t.__repr__() implements `t`
def __repr__(self):
"""t.__repr__() implements `t`."""
return '<Template instance, steps=' + `self.steps` + '>'
# t.reset() restores a pipeline template to its initial state
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
# t.clone() returns a new pipeline template with identical
# initial state as the current one
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
# t.debug(flag) turns debugging on or off
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
# t.append(cmd, kind) adds a new step at the end
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) <> type(''):
raise TypeError, \
'Template.append: cmd must be a string'
@ -132,8 +130,8 @@ def append(self, cmd, kind):
'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind))
# t.prepend(cmd, kind) adds a new step at the front
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) <> type(''):
raise TypeError, \
'Template.prepend: cmd must be a string'
@ -154,9 +152,9 @@ def prepend(self, cmd, kind):
'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind))
# t.open(file, rw) returns a pipe or file object open for
# reading or writing; the file is the other end of the pipeline
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
@ -164,10 +162,9 @@ def open(self, file, rw):
raise ValueError, \
'Template.open: rw must be \'r\' or \'w\', not ' + `rw`
# t.open_r(file) and t.open_w(file) implement
# t.open(file, 'r') and t.open(file, 'w') respectively
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if self.steps == []:
return open(file, 'r')
if self.steps[-1][1] == SINK:

View file

@ -1,3 +1,11 @@
"""Spawn a command with pipes to its stdin, stdout, and optionally stderr.
The normal os.popen(cmd, mode) call spawns a shell command and provides a
file interface to just the input or output of the process depending on
whether mode is 'r' or 'w'. This module provides the functions popen2(cmd)
and popen3(cmd) which return two or three pipes to the spawned command.
"""
import os
import sys
import string
@ -11,7 +19,15 @@ def _cleanup():
inst.poll()
class Popen3:
"""Class representing a child process. Normally instances are created
by the factory functions popen2() and popen3()."""
def __init__(self, cmd, capturestderr=0, bufsize=-1):
"""The parameter 'cmd' is the shell command to execute in a
sub-process. The 'capturestderr' flag, if true, specifies that
the object should capture standard error output of the child process.
The default is false. If the 'bufsize' parameter is specified, it
specifies the size of the I/O buffers to/from the child process."""
if type(cmd) == type(''):
cmd = ['/bin/sh', '-c', cmd]
p2cread, p2cwrite = os.pipe()
@ -51,7 +67,10 @@ def __init__(self, cmd, capturestderr=0, bufsize=-1):
self.childerr = None
self.sts = -1 # Child not completed yet
_active.append(self)
def poll(self):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
@ -61,7 +80,9 @@ def poll(self):
except os.error:
pass
return self.sts
def wait(self):
"""Wait for and return the exit status of the child process."""
pid, sts = os.waitpid(self.pid, 0)
if pid == self.pid:
self.sts = sts
@ -69,11 +90,17 @@ def wait(self):
return self.sts
def popen2(cmd, bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. If 'bufsize' is
specified, it sets the buffer size for the I/O pipes. The file objects
(child_stdout, child_stdin) are returned."""
_cleanup()
inst = Popen3(cmd, 0, bufsize)
return inst.fromchild, inst.tochild
def popen3(cmd, bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. If 'bufsize' is
specified, it sets the buffer size for the I/O pipes. The file objects
(child_stdout, child_stdin, child_stderr) are returned."""
_cleanup()
inst = Popen3(cmd, 1, bufsize)
return inst.fromchild, inst.tochild, inst.childerr

View file

@ -1,64 +1,61 @@
#
# Start of posixfile.py
#
"""Extended file operations available in POSIX.
#
# Extended file operations
#
# f = posixfile.open(filename, [mode, [bufsize]])
# will create a new posixfile object
#
# f = posixfile.fileopen(fileobject)
# will create a posixfile object from a builtin file object
#
# f.file()
# will return the original builtin file object
#
# f.dup()
# will return a new file object based on a new filedescriptor
#
# f.dup2(fd)
# will return a new file object based on the given filedescriptor
#
# f.flags(mode)
# will turn on the associated flag (merge)
# mode can contain the following characters:
#
# (character representing a flag)
# a append only flag
# c close on exec flag
# n no delay flag
# s synchronization flag
# (modifiers)
# ! turn flags 'off' instead of default 'on'
# = copy flags 'as is' instead of default 'merge'
# ? return a string in which the characters represent the flags
# that are set
#
# note: - the '!' and '=' modifiers are mutually exclusive.
# - the '?' modifier will return the status of the flags after they
# have been changed by other characters in the mode string
#
# f.lock(mode [, len [, start [, whence]]])
# will (un)lock a region
# mode can contain the following characters:
#
# (character representing type of lock)
# u unlock
# r read lock
# w write lock
# (modifiers)
# | wait until the lock can be granted
# ? return the first lock conflicting with the requested lock
# or 'None' if there is no conflict. The lock returned is in the
# format (mode, len, start, whence, pid) where mode is a
# character representing the type of lock ('r' or 'w')
#
# note: - the '?' modifier prevents a region from being locked; it is
# query only
#
f = posixfile.open(filename, [mode, [bufsize]])
will create a new posixfile object
f = posixfile.fileopen(fileobject)
will create a posixfile object from a builtin file object
f.file()
will return the original builtin file object
f.dup()
will return a new file object based on a new filedescriptor
f.dup2(fd)
will return a new file object based on the given filedescriptor
f.flags(mode)
will turn on the associated flag (merge)
mode can contain the following characters:
(character representing a flag)
a append only flag
c close on exec flag
n no delay flag
s synchronization flag
(modifiers)
! turn flags 'off' instead of default 'on'
= copy flags 'as is' instead of default 'merge'
? return a string in which the characters represent the flags
that are set
note: - the '!' and '=' modifiers are mutually exclusive.
- the '?' modifier will return the status of the flags after they
have been changed by other characters in the mode string
f.lock(mode [, len [, start [, whence]]])
will (un)lock a region
mode can contain the following characters:
(character representing type of lock)
u unlock
r read lock
w write lock
(modifiers)
| wait until the lock can be granted
? return the first lock conflicting with the requested lock
or 'None' if there is no conflict. The lock returned is in the
format (mode, len, start, whence, pid) where mode is a
character representing the type of lock ('r' or 'w')
note: - the '?' modifier prevents a region from being locked; it is
query only
"""
class _posixfile_:
"""File wrapper class that provides extra POSIX file routines."""
states = ['open', 'closed']
#
@ -215,13 +212,12 @@ def lock(self, how, *args):
else:
return 'w', l_len, l_start, l_whence, l_pid
#
# Public routine to obtain a posixfile object
#
def open(name, mode='r', bufsize=-1):
"""Public routine to open a file as a posixfile object."""
return _posixfile_().open(name, mode, bufsize)
def fileopen(file):
"""Public routine to get a posixfile object from a Python file object."""
return _posixfile_().fileopen(file)
#

View file

@ -1,13 +1,13 @@
# Module 'posixpath' -- common operations on Posix pathnames.
# Some of this can actually be useful on non-Posix systems too, e.g.
# for manipulation of the pathname component of URLs.
# The "os.path" name is an alias for this module on Posix systems;
# on other systems (e.g. Mac, Windows), os.path provides the same
# operations in a manner specific to that platform, and is an alias
# to another module (e.g. macpath, ntpath).
"""Common pathname manipulations, Posix version.
Instead of importing this module
directly, import os and refer to this module as os.path.
"""Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
@ -369,8 +369,8 @@ def normpath(path):
return slashes + string.joinfields(comps, '/')
# Return an absolute path.
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)

View file

@ -7,6 +7,7 @@
#
# See profile.doc for more information
"""Class for profiling Python code."""
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
@ -79,44 +80,43 @@ def help():
print 'along the Python search path'
#**************************************************************************
# class Profile documentation:
#**************************************************************************
# self.cur is always a tuple. Each such tuple corresponds to a stack
# frame that is currently active (self.cur[-2]). The following are the
# definitions of its members. We use this external "parallel stack" to
# avoid contaminating the program that we are profiling. (old profiler
# used to write into the frames local dictionary!!) Derived classes
# can change the definition of some entries, as long as they leave
# [-2:] intact.
#
# [ 0] = Time that needs to be charged to the parent frame's function. It is
# used so that a function call will not have to access the timing data
# for the parents frame.
# [ 1] = Total time spent in this frame's function, excluding time in
# subfunctions
# [ 2] = Cumulative time spent in this frame's function, including time in
# all subfunctions to this frame.
# [-3] = Name of the function that corresonds to this frame.
# [-2] = Actual frame that we correspond to (used to sync exception handling)
# [-1] = Our parent 6-tuple (corresonds to frame.f_back)
#**************************************************************************
# Timing data for each function is stored as a 5-tuple in the dictionary
# self.timings[]. The index is always the name stored in self.cur[4].
# The following are the definitions of the members:
#
# [0] = The number of times this function was called, not counting direct
# or indirect recursion,
# [1] = Number of times this function appears on the stack, minus one
# [2] = Total time spent internal to this function
# [3] = Cumulative time that this function was present on the stack. In
# non-recursive functions, this is the total execution time from start
# to finish of each invocation of a function, including time spent in
# all subfunctions.
# [5] = A dictionary indicating for each function name, the number of times
# it was called by us.
#**************************************************************************
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions
[ 2] = Cumulative time spent in this frame's function, including time in
all subfunctions to this frame.
[-3] = Name of the function that corresonds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling)
[-1] = Our parent 6-tuple (corresonds to frame.f_back)
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[4].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[5] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
def __init__(self, timer=None):
self.timings = {}
@ -449,19 +449,16 @@ def profiler_simulation(self, x, y, z):
#****************************************************************************
# OldProfile class documentation
#****************************************************************************
#
# The following derived profiler simulates the old style profile, providing
# errant results on recursive functions. The reason for the usefulnes of this
# profiler is that it runs faster (i.e., less overhead). It still creates
# all the caller stats, and is quite useful when there is *no* recursion
# in the user's code.
#
# This code also shows how easy it is to create a modified profiler.
#****************************************************************************
class OldProfile(Profile):
"""A derived profiler that simulates the old style profile, providing
errant results on recursive functions. The reason for the usefulness of
this profiler is that it runs faster (i.e., less overhead). It still
creates all the caller stats, and is quite useful when there is *no*
recursion in the user's code.
This code also shows how easy it is to create a modified profiler.
"""
def trace_dispatch_exception(self, frame, t):
rt, rtt, rct, rfn, rframe, rcur = self.cur
if rcur and not rframe is frame:
@ -509,16 +506,13 @@ def snapshot_stats(self):
#****************************************************************************
# HotProfile class documentation
#****************************************************************************
#
# This profiler is the fastest derived profile example. It does not
# calculate caller-callee relationships, and does not calculate cumulative
# time under a function. It only calculates time spent in a function, so
# it runs very quickly (re: very low overhead)
#****************************************************************************
class HotProfile(Profile):
"""The fastest derived profile example. It does not calculate
caller-callee relationships, and does not calculate cumulative
time under a function. It only calculates time spent in a
function, so it runs very quickly due to its very low overhead.
"""
def trace_dispatch_exception(self, frame, t):
rt, rtt, rfn, rframe, rcur = self.cur
if rcur and not rframe is frame:

View file

@ -1,4 +1,5 @@
#
"""Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94
#
# Based on prior profile module by Sjoerd Mullender...
@ -37,41 +38,38 @@
import marshal
import re
#**************************************************************************
# Class Stats documentation
#**************************************************************************
# This class is used for creating reports from data generated by the
# Profile class. It is a "friend" of that class, and imports data either
# by direct access to members of Profile class, or by reading in a dictionary
# that was emitted (via marshal) from the Profile class.
#
# The big change from the previous Profiler (in terms of raw functionality)
# is that an "add()" method has been provided to combine Stats from
# several distinct profile runs. Both the constructor and the add()
# method now take arbitrarilly many file names as arguments.
#
# All the print methods now take an argument that indicats how many lines
# to print. If the arg is a floating point number between 0 and 1.0, then
# it is taken as a decimal percentage of the availabel lines to be printed
# (e.g., .1 means print 10% of all available lines). If it is an integer,
# it is taken to mean the number of lines of data that you wish to have
# printed.
#
# The sort_stats() method now processes some additionaly options (i.e., in
# addition to the old -1, 0, 1, or 2). It takes an arbitrary number of quoted
# strings to select the sort order. For example sort_stats('time', 'name')
# sorts on the major key of "internal function time", and on the minor
# key of 'the name of the function'. Look at the two tables in sort_stats()
# and get_sort_arg_defs(self) for more examples.
#
# All methods now return "self", so you can string together commands like:
# Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
# print_stats(5).print_callers(5)
#
#**************************************************************************
import fpformat
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarilly many file names as arguments.
All the print methods now take an argument that indicats how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the availabel lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additionaly options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of quoted
strings to select the sort order. For example sort_stats('time', 'name')
sorts on the major key of "internal function time", and on the minor
key of 'the name of the function'. Look at the two tables in sort_stats()
and get_sort_arg_defs(self) for more examples.
All methods now return "self", so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args):
if not len(args):
arg = None
@ -182,8 +180,8 @@ def add(self, *arg_list):
"time" : (((2,-1), ), "internal time"),\
}
# Expand all abbreviations that are unique
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
std_list = dict.keys()
@ -289,9 +287,9 @@ def calc_callees(self):
all_callees[func2][func] = callers[func2]
return
#******************************************************************
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
@ -447,17 +445,14 @@ def ignore(self):
pass # has no return value, so use at end of line :-)
#**************************************************************************
# class TupleComp Documentation
#**************************************************************************
# This class provides a generic function for comparing any two tuples.
# Each instance records a list of tuple-indicies (from most significant
# to least significant), and sort direction (ascending or decending) for
# each tuple-index. The compare functions can then be used as the function
# argument to the system sort() function when a list of tuples need to be
# sorted in the instances order.
#**************************************************************************
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indicies (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
@ -495,16 +490,16 @@ def func_split(func_name):
# such as callers and callees.
#**************************************************************************
# Add together all the stats for two profile entries
def add_func_stats(target, source):
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct, \
add_callers(t_callers, callers))
# Combine two caller lists in a single list.
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func in target.keys():
new_callers[func] = target[func]
@ -515,8 +510,8 @@ def add_callers(target, source):
new_callers[func] = source[func]
return new_callers
# Sum the caller statistics to get total number of calls recieved
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for func in callers.keys():
nc = nc + callers[func]
@ -529,4 +524,3 @@ def count_calls(callers):
def f8(x):
return string.rjust(fpformat.fix(x, 3), 8)

View file

@ -1,4 +1,4 @@
# pty.py -- Pseudo terminal utilities.
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
@ -16,8 +16,9 @@
CHILD = 0
# Open pty master. Returns (master_fd, tty_name). SGI and Linux/BSD version.
def master_open():
"""Open pty master and return (master_fd, tty_name).
SGI and Linux/BSD version."""
try:
import sgi
except ImportError:
@ -38,14 +39,15 @@ def master_open():
return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices'
# Open the pty slave. Acquire the controlling terminal.
# Returns file descriptor. Linux version. (Should be universal? --Guido)
def slave_open(tty_name):
"""Open the pty slave and acquire the controlling terminal.
Return the file descriptor. Linux version."""
# (Should be universal? --Guido)
return os.open(tty_name, FCNTL.O_RDWR)
# Fork and make the child a session leader with a controlling terminal.
# Returns (pid, master_fd)
def fork():
"""Fork and make the child a session leader with a controlling terminal.
Return (pid, master_fd)."""
master_fd, tty_name = master_open()
pid = os.fork()
if pid == CHILD:
@ -66,21 +68,21 @@ def fork():
# Parent and child process.
return pid, master_fd
# Write all the data to a descriptor.
def writen(fd, data):
"""Write all the data to a descriptor."""
while data != '':
n = os.write(fd, data)
data = data[n:]
# Default read function.
def read(fd):
"""Default read function."""
return os.read(fd, 1024)
# Parent copy loop.
# Copies
# pty master -> standard output (master_read)
# standard input -> pty master (stdin_read)
def copy(master_fd, master_read=read, stdin_read=read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
while 1:
rfds, wfds, xfds = select(
[master_fd, STDIN_FILENO], [], [])
@ -91,8 +93,8 @@ def copy(master_fd, master_read=read, stdin_read=read):
data = stdin_read(STDIN_FILENO)
writen(master_fd, data)
# Create a spawned process.
def spawn(argv, master_read=read, stdin_read=read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()

View file

@ -7,7 +7,7 @@
MAGIC = imp.get_magic()
def wr_long(f, x):
"Internal; write a 32-bit int to a file in little-endian order."
"""Internal; write a 32-bit int to a file in little-endian order."""
f.write(chr( x & 0xff))
f.write(chr((x >> 8) & 0xff))
f.write(chr((x >> 16) & 0xff))