class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
-
+
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
here = end
results.append(escape(text[here:]))
return ''.join(results)
-
+
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
-
+
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
-
+
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
-
- head = '<big><big><strong>%s</strong></big></big>' % server_name
+
+ head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
-
+
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
This class is designed as mix-in and should not
be constructed directly.
"""
-
+
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
-
+
methods = {}
for method_name in self.system_listMethods():
self.server_documentation,
methods
)
-
+
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
Interpret all HTTP GET requests as requests for server
documentation.
"""
-
+
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
logRequests=1):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
XMLRPCDocGenerator.__init__(self)
-
+
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
Converts an angle in degrees to an angle in radians"""
import math
- return deg * math.pi / 180
-
+ return deg * math.pi / 180
+
server = DocXMLRPCServer(("localhost", 8000))
server.set_server_title("Math Server")
server.register_function(deg_to_rad)
server.register_introspection_functions()
- server.serve_forever()
\ No newline at end of file
+ server.serve_forever()
def pattern(self, format):
"""Return re pattern for the format string.
-
+
Need to make sure that any characters that might be interpreted as
regex syntax is escaped.
-
+
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
return key
else:
raise ValueError("value not in list")
-
import sys
del sys.modules[__name__]
raise
-
+
# bsddb3 calls it db, but provide _db for backwards compatibility
db = _db = _bsddb
__version__ = db.__version__
if db.version() >= (4,1):
def set_encrypt(self, *args, **kwargs):
return apply(self._cobj.set_encrypt, args, kwargs)
-
#---------------------------------------------------------------------------
-
-
-
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
- ("extrasaction (%s) must be 'raise' or 'ignore'" %
+ ("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args)
for k in rowdict.keys():
if k not in self.fieldnames:
raise ValueError, "dict contains fields not in fieldnames"
- return [rowdict.get(key, self.restval) for key in self.fieldnames]
+ return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
"""
Takes a file-like object and returns a dialect (or None)
"""
-
+
self.fileobj = fileobj
-
+
data = fileobj.read(self.sample)
quotechar, delimiter, skipinitialspace = self._guessQuoteAndDelimiter(data)
def hasHeaders(self):
return self._hasHeaders(self.fileobj, self.dialect)
-
+
def register_dialect(self, name = 'sniffed'):
csv.register_dialect(name, self.dialect)
-
+
def _guessQuoteAndDelimiter(self, data):
"""
matches = regexp.findall(data)
if matches:
break
-
+
if not matches:
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
-
+
return (quotechar, delim, skipinitialspace)
e.g. "x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows"
3) use the mode of the meta-frequency to determine the /expected/
- frequency for that character
- 4) find out how often the character actually meets that goal
- 5) the character that best meets its goal is the delimiter
+ frequency for that character
+ 4) find out how often the character actually meets that goal
+ 5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
- additional chunks as necessary.
+ additional chunks as necessary.
"""
-
+
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# be a string in which case the length of the string is the determining factor: if
# all of the rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or subtracting from
- # the likelihood of the first row being a header.
+ # the likelihood of the first row being a header.
def seval(item):
"""
return eval(item.replace('(', '').replace(')', ''))
fileobj.seek(0) # rewind the fileobj - this might not work for some file-like objects...
-
+
reader = csv.reader(fileobj,
delimiter = dialect.delimiter,
quotechar = dialect.quotechar,
hasHeader -= 1
return hasHeader > 0
-
-
-
# Register the search_function in the Python codec registry
codecs.register(search_function)
-
continue
newlabel.append(stringprep.map_table_b2(c))
label = u"".join(newlabel)
-
+
# Normalize
label = unicodedata.normalize("NFKC", label)
-
+
# Prohibit
for c in label:
if stringprep.in_table_c12(c) or \
# Step 8: return the result of step 5
return result
-
+
### Codec APIs
class Codec(codecs.Codec):
return ".".join(result), len(input)
def decode(self,input,errors='strict'):
-
+
if errors != 'strict':
raise UnicodeError, "Unsupported error handling "+errors
##################### Encoding #####################################
def segregate(str):
- """3.1 Basic code point segregation"""
+ """3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
oldindex = index
delta = 0
oldchar = char
-
+
return result
def T(j, bias):
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
-
+
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
return extpos, result
w = w * (36 - t)
j += 1
-
+
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
-
+
### Codec APIs
class Codec(codecs.Codec):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
- # ISO 8879:1986, however, has more complex
+ # ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
- # name in the following list: ENTITY, DOCTYPE, ELEMENT,
- # ATTLIST, NOTATION, SHORTREF, USEMAP,
+ # name in the following list: ENTITY, DOCTYPE, ELEMENT,
+ # ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
-
+
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
# which will become a factory function when there are many Option
# classes.
make_option = Option
-
_libc_search = re.compile(r'(__libc_init)'
'|'
- '(GLIBC_([0-9.]+))'
- '|'
+ '(GLIBC_([0-9.]+))'
+ '|'
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
def libc_ver(executable=sys.executable,lib='',version='',
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable is probably only
- useable for executables compiled using gcc.
+ useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
def _dist_try_harder(distname,version,id):
- """ Tries some special tricks to get the distribution
+ """ Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
-
+
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
RegQueryValueEx(keyCurVer,'SystemRoot')
except:
return release,version,csd,ptype
-
+
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
0x80:'final'}.get(stage,'')
versioninfo = (version,stage,nonrel)
if sysa:
- machine = {0x1: '68k',
+ machine = {0x1: '68k',
0x2: 'PowerPC'}.get(sysa,'')
return release,versioninfo,machine
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
-
+
""" Version interface for JPython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
os_name = _java_getprop('java.os.name',os_name)
os_version = _java_getprop('java.os.version',os_version)
osinfo = os_name,os_version,os_arch
-
+
return release,vendor,vminfo,osinfo
### System name aliasing
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
-
+
# Get data from the 'file' system command
output = _syscmd_file(executable,'')
if not output and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
- # some sensible defaults then...
+ # some sensible defaults then...
if _default_architecture.has_key(sys.platform):
b,l = _default_architecture[sys.platform]
if b:
# Split the output into a list of strings omitting the filename
fileout = _architecture_split(output)[1:]
-
+
if 'executable' not in fileout:
# Format not supported
return bits,linkage
return bits,linkage
### Portable uname() interface
-
+
_uname_cache = None
def uname():
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
-
+
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
-
+
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
-
+
if aliased:
_platform_aliased_cache = platform
elif terse:
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
- terse = ('terse' in sys.argv or '--terse' in sys.argv)
+ terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print platform(aliased,terse)
sys.exit(0)
break
continue
if rawdata.startswith("<!--", i):
- # Strictly speaking, a comment is --.*--
- # within a declaration tag <!...>.
- # This should be removed,
- # and comments handled only in parse_declaration.
+ # Strictly speaking, a comment is --.*--
+ # within a declaration tag <!...>.
+ # This should be removed,
+ # and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
d[key] = data # store data at key (overwrites old data if
# using an existing key)
- data = d[key] # retrieve a COPY of the data at key (raise
+ data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
-
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
-
def callback():
idents.append(thread.get_ident())
-
+
_testcapi._test_thread_state(callback)
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
have_thread_state = True
except AttributeError:
have_thread_state = False
-
+
if have_thread_state:
TestThreadState()
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
-
+
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
- ('\xcd\x81',
+ ('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
class Test_Csv(unittest.TestCase):
"""
- Test the underlying C csv parser in ways that are not appropriate
+ Test the underlying C csv parser in ways that are not appropriate
from the high level interface. Further tests of this nature are done
in TestDialectRegistry.
"""
obj.dialect.delimiter = '\t'
self.assertEqual(obj.dialect.delimiter, '\t')
self.assertRaises(TypeError, delattr, obj.dialect, 'delimiter')
- self.assertRaises(TypeError, setattr, obj.dialect,
+ self.assertRaises(TypeError, setattr, obj.dialect,
'lineterminator', None)
obj.dialect.escapechar = None
self.assertEqual(obj.dialect.escapechar, None)
fileobj = StringIO()
writer = csv.writer(fileobj, **kwargs)
writer.writerow(fields)
- self.assertEqual(fileobj.getvalue(),
+ self.assertEqual(fileobj.getvalue(),
expect + writer.dialect.lineterminator)
def test_write_arg_valid(self):
self.assertRaises(csv.Error, self._write_test, None, '')
self._write_test((), '')
self._write_test([None], '""')
- self.assertRaises(csv.Error, self._write_test,
+ self.assertRaises(csv.Error, self._write_test,
[None], None, quoting = csv.QUOTE_NONE)
# Check that exceptions are passed up the chain
class BadList:
def test_write_quoting(self):
self._write_test(['a','1','p,q'], 'a,1,"p,q"')
- self.assertRaises(csv.Error,
+ self.assertRaises(csv.Error,
self._write_test,
['a','1','p,q'], 'a,1,"p,q"',
quoting = csv.QUOTE_NONE)
self.readerAssertEqual(' "a"', [[' "a"']])
def test_quoted(self):
- self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
- [['1', '2', '3',
- 'I think, therefore I am',
+ self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
+ [['1', '2', '3',
+ 'I think, therefore I am',
'5', '6']])
def test_quoted_quote(self):
self.readerAssertEqual('1,2,3,"""I see,"" said the blind man","as he picked up his hammer and saw"',
- [['1', '2', '3',
- '"I see," said the blind man',
+ [['1', '2', '3',
+ '"I see," said the blind man',
'as he picked up his hammer and saw']])
def test_quoted_nl(self):
hammer and saw"
9,8,7,6'''
self.readerAssertEqual(input,
- [['1', '2', '3',
- '"I see,"\nsaid the blind man',
+ [['1', '2', '3',
+ '"I see,"\nsaid the blind man',
'as he picked up his\nhammer and saw'],
['9','8','7','6']])
self.failUnlessEqual(42, ll_convert("L", 42))
self.failUnlessEqual(42, ll_convert("L", 42L))
self.assertRaises(OverflowError, ll_convert, "L", VERY_LARGE)
-
+
def test_K(self):
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, ull_convert, "K", 3.14)
suite.addTest(unittest.makeSuite(PluralFormsTestCase))
suite.addTest(unittest.makeSuite(UnicodeTranslationsTest))
return suite
-
+
def test_main():
run_suite(suite())
def test_fsref(self):
fsr = macfs.FSRef(test_support.TESTFN)
self.assertEqual(os.path.realpath(test_support.TESTFN), fsr.as_pathname())
-
+
def test_fsref_unicode(self):
if sys.getfilesystemencoding():
testfn_unicode = unicode(test_support.TESTFN)
if __name__ == "__main__":
test_main()
-
import aetools
class TestScriptpackages(unittest.TestCase):
-
+
def _test_scriptpackage(self, package, testobject=1):
# Check that we can import the package
mod = __import__(package)
if testobject:
# Test that we can get an application object
obj = mod.application(0)
-
+
def test__builtinSuites(self):
self._test_scriptpackage('_builtinSuites', testobject=0)
-
+
def test_StdSuites(self):
self._test_scriptpackage('StdSuites')
-
+
def test_SystemEvents(self):
self._test_scriptpackage('SystemEvents')
-
+
def test_Finder(self):
self._test_scriptpackage('Finder')
-
+
def test_Terminal(self):
self._test_scriptpackage('Terminal')
-
+
def test_Netscape(self):
self._test_scriptpackage('Netscape')
-
+
def test_Explorer(self):
self._test_scriptpackage('Explorer')
-
+
def test_CodeWarrior(self):
self._test_scriptpackage('CodeWarrior')
ret.append(tok)
tok = lex.get_token()
return ret
-
+
def testSplitPosix(self):
"""Test data splitting with posix parser"""
- self.splitTest(self.posix_data, comments=True)
+ self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
except KeyboardInterrupt:
if verbose:
print "KeyboardInterrupt (assume the alarm() went off)"
-
# h = sha.sha()
# h.update(data)
# print p,h.hexdigest()
-
-
from os import environ
- # Epoch time of midnight Dec 25th 2002. Never DST in northern
+ # Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
- xmas2002 = 1040774400.0
+ xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
- eastern = 'EST+05EDT,M4.1.0,M10.5.0'
+ eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
self.failUnlessEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
- self.failUnlessEqual(time.daylight, 0)
+ self.failUnlessEqual(time.daylight, 0)
self.failUnlessEqual(time.timezone, 0)
self.failUnlessEqual(time.localtime(xmas2002).tm_isdst, 0)
elif environ.has_key('TZ'):
del environ['TZ']
time.tzset()
-
+
def test_main():
test_support.run_unittest(TimeTestCase)
from test.test_support import verify, TestSkipped, TESTFN_UNICODE
from test.test_support import TESTFN_ENCODING
try:
- TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
+ TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
-
+
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
-
+
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
-
+
registered[name] = factory
def _good_enough(dom, features):
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
-
+
import os
creator = None
mod = well_known_implementations.get(name)
def __iter__(self):
return self
-
+
def expandNode(self, node):
event = self.getEvent()
parents = [node]
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
-# initial value: UTF-8
+# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
-
+
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
-
+
for k in db.keys():
pickle.dump((k, db[k]), pfile, 1==1)
prog = sys.argv[0]
def usage():
- sys.stderr.write(__doc__ % globals())
+ sys.stderr.write(__doc__ % globals())
def main(args):
try:
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
- outfile=None):
+ outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
if type(thingie) is types.DictType:
# backwards compatibility for old trace.py after
# Zooko touched it but before calledfuncs --Zooko
- # 2001-10-24
+ # 2001-10-24
self.update(self.__class__(thingie))
elif type(thingie) is types.TupleType and len(thingie) == 2:
counts, calledfuncs = thingie
pass
except pickle.UnpicklingError:
# backwards compatibility for old trace.py before
- # Zooko touched it --Zooko 2001-10-24
+ # Zooko touched it --Zooko 2001-10-24
self.update(self.__class__(marshal.load(open(self.infile))))
def update(self, other):
if key != 'calledfuncs':
# backwards compatibility for abortive attempt to
# stuff calledfuncs into self.counts, by Zooko
- # --Zooko 2001-10-24
+ # --Zooko 2001-10-24
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
ignoredirs=(), infile=None, outfile=None):
"""
@param count true iff it should count number of times each
- line is executed
+ line is executed
@param trace true iff it should print out each line that is
- being counted
+ being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
- `count' and `trace'
+ `count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
- all of the (recursive) contents of
+ all of the (recursive) contents of
@param infile file from which to read stored counts to be
- added into the results
+ added into the results
@param outfile file in which to write the results
"""
self.infile = infile
# XXX I wish inspect offered me an optimized
# `getfilename(frame)' to use in place of the presumably
# heavier `getframeinfo()'. --Zooko 2001-10-14
-
+
filename, lineno, funcname, context, lineindex = \
inspect.getframeinfo(frame, 1)
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
-
+
# XXX not convinced that this memoizing is a performance
# win -- I don't know enough about Python guts to tell.
# --Zooko 2001-10-14
-
+
bname = self.pathtobasename.get(filename)
if bname is None:
-
+
# Using setdefault faster than two separate lines?
# --Zooko 2001-10-14
bname = self.pathtobasename.setdefault(filename,
# heavier `getframeinfo()'. --Zooko 2001-10-14
filename, lineno, funcname, context, lineindex = \
inspect.getframeinfo(frame)
-
+
# XXX not convinced that this memoizing is a performance
# win -- I don't know enough about Python guts to tell.
# --Zooko 2001-10-14