- added READMEs to all examples in each __init__.py and added to sphinx documentation
- added versioning example
- removed vertical/vertical.py, the dictlikes are more straightforward
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../lib'))
+sys.path.insert(0, os.path.abspath('../../examples'))
sys.path.insert(0, os.path.abspath('.'))
import sqlalchemy
# General information about the project.
project = u'SQLAlchemy'
-copyright = u'2007, 2008, 2009, the SQLAlchemy authors and contributors'
+copyright = u'2007, 2008, 2009, 2010, the SQLAlchemy authors and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
--- /dev/null
+.. _examples_toplevel:
+
+Examples
+========
+
+The SQLAlchemy distribution includes a variety of code examples illustrating a select set of patterns, some typical and some not so typical. All are runnable and can be found in the ``/examples`` directory of the distribution. Each example contains a README in its ``__init__.py`` file, each of which are listed below.
+
+Additional SQLAlchemy examples, some user contributed, are available on the wiki at `<http://www.sqlalchemy.org/trac/wiki/UsageRecipes>`_.
+
+Adjacency List
+--------------
+
+Location: /examples/adjacency_list/
+
+.. automodule:: adjacency_list
+
+Associations
+------------
+
+Location: /examples/association/
+
+.. automodule:: association
+
+Attribute Instrumentation
+-------------------------
+
+Location: /examples/custom_attributes/
+
+.. automodule:: custom_attributes
+
+Beaker Caching
+--------------
+
+Location: /examples/beaker_caching/
+
+.. automodule:: beaker_caching
+
+Derived Attributes
+------------------
+
+Location: /examples/derived_attributes/
+
+.. automodule:: derived_attributes
+
+
+Directed Graphs
+---------------
+
+Location: /examples/graphs/
+
+.. automodule:: graphs
+
+Dynamic Relations as Dictionaries
+----------------------------------
+
+Location: /examples/dynamic_dict/
+
+.. automodule:: dynamic_dict
+
+Horizontal Sharding
+-------------------
+
+Location: /examples/sharding
+
+.. automodule:: sharding
+
+Inheritance Mappings
+--------------------
+
+Location: /examples/inheritance/
+
+.. automodule:: inheritance
+
+Large Collections
+-----------------
+
+Location: /examples/large_collection/
+
+.. automodule:: large_collection
+
+Nested Sets
+-----------
+
+Location: /examples/nested_sets/
+
+.. automodule:: nested_sets
+
+Polymorphic Associations
+------------------------
+
+Location: /examples/poly_assoc/
+
+.. automodule:: poly_assoc
+
+PostGIS Integration
+-------------------
+
+Location: /examples/postgis
+
+.. automodule:: postgis
+
+Versioned Objects
+-----------------
+
+Location: /examples/versioning
+
+.. automodule:: versioning
+
+Vertical Attribute Mapping
+--------------------------
+
+Location: /examples/vertical
+
+.. automodule:: vertical
+
+XML Persistence
+---------------
+
+Location: /examples/elementtree/
+
+.. automodule:: elementtree
session
dbengine
metadata
+ examples
reference/index
Indices and tables
Above, a ``Query`` issued for the ``User`` class will use the value of the mapper's ``order_by`` setting if the ``Query`` itself has no ordering specified.
+.. _datamapping_inheritance:
+
Mapping Class Inheritance Hierarchies
--------------------------------------
+"""
+An example of a dictionary-of-dictionaries structure mapped using
+an adjacency list model.
+
+E.g.::
+
+ node = TreeNode('rootnode')
+ node.append('node1')
+ node.append('node3')
+ session.add(node)
+ session.commit()
+
+ dump_tree(node)
+
+"""
+
-"""
-An example of a dictionary-of-dictionaries structure mapped using
-an adjacency list model
-
-"""
-
from sqlalchemy import MetaData, Table, Column, Sequence, ForeignKey,\
Integer, String, create_engine
+"""
+
+Examples illustrating the usage of the "association object" pattern,
+where an intermediary object associates two endpoint objects together.
+
+The first example illustrates a basic association from a User object
+to a collection or Order objects, each which references a collection of Item objects.
+
+The second example builds upon the first to add the Association Proxy extension.
+
+E.g.::
+
+ # create an order
+ order = Order('john smith')
+
+ # append an OrderItem association via the "itemassociations"
+ # collection with a custom price.
+ order.itemassociations.append(OrderItem(item('MySQL Crowbar'), 10.99))
+
+ # append two more Items via the transparent "items" proxy, which
+ # will create OrderItems automatically using the default price.
+ order.items.append(item('SA Mug'))
+ order.items.append(item('SA Hat'))
+
+"""
\ No newline at end of file
+++ /dev/null
-Illustrates how to embed Beaker cache functionality within
-the Query object, allowing full cache control as well as the
-ability to pull "lazy loaded" attributes from long term cache
-as well.
-
-In this demo, the following techniques are illustrated:
-
- * Using custom subclasses of Query
- * Basic technique of circumventing Query to pull from a
- custom cache source instead of the database.
- * Rudimental caching with Beaker, using "regions" which allow
- global control over a fixed set of configurations.
- * Using custom MapperOption objects to configure options on
- a Query, including the ability to invoke the options
- deep within an object graph when lazy loads occur.
-
-To run, both SQLAlchemy and Beaker (1.4 or greater) must be
-installed or on the current PYTHONPATH. The demo will create a local
-directory for datafiles, insert initial data, and run. Running the
-demo a second time will utilize the cache files already present, and
-exactly one SQL statement against two tables will be emitted - the
-displayed result however will utilize dozens of lazyloads that all
-pull from cache.
-
-Three endpoint scripts, in order of complexity, are run as follows:
-
- python examples/beaker_caching/helloworld.py
-
- python examples/beaker_caching/relation_caching.py
-
- python examples/beaker_caching/advanced.py
-
- python examples/beaker_caching/local_session_caching.py
-
-
-Listing of files:
-
-__init__.py - Establish data / cache file paths, and configurations,
-bootstrap fixture data if necessary.
-
-meta.py - Represent persistence structures which allow the usage of
-Beaker caching with SQLAlchemy. Introduces a query option called
-FromCache.
-
-model.py - The datamodel, which represents Person that has multiple
-Address objects, each with PostalCode, City, Country
-
-fixture_data.py - creates demo PostalCode, Address, Person objects
-in the database.
-
-helloworld.py - the basic idea.
-
-relation_caching.py - Illustrates how to add cache options on
-relation endpoints, so that lazyloads load from cache.
-
-advanced.py - Further examples of how to use FromCache. Combines
-techniques from the first two scripts.
-
-local_session_caching.py - Grok everything so far ? This example
-creates a new Beaker container that will persist data in a dictionary
-which is local to the current session. remove() the session
-and the cache is gone.
\ No newline at end of file
-"""__init__.py
+"""
+Illustrates how to embed Beaker cache functionality within
+the Query object, allowing full cache control as well as the
+ability to pull "lazy loaded" attributes from long term cache
+as well.
-Establish data / cache file paths, and configurations,
-bootstrap fixture data if necessary.
+In this demo, the following techniques are illustrated:
-"""
-import meta, model, fixture_data
-from sqlalchemy import create_engine
-import os
-
-root = "./beaker_data/"
-
-if not os.path.exists(root):
- raw_input("Will create datafiles in %r.\n"
- "To reset the cache + database, delete this directory.\n"
- "Press enter to continue.\n" % root
- )
- os.makedirs(root)
+* Using custom subclasses of Query
+* Basic technique of circumventing Query to pull from a
+ custom cache source instead of the database.
+* Rudimental caching with Beaker, using "regions" which allow
+ global control over a fixed set of configurations.
+* Using custom MapperOption objects to configure options on
+ a Query, including the ability to invoke the options
+ deep within an object graph when lazy loads occur.
+
+E.g.::
+
+ # query for Person objects, specifying cache
+ q = Session.query(Person).options(FromCache("default", "all_people"))
+
+ # specify that each Person's "addresses" collection comes from
+ # cache too
+ q = q.options(FromCache("default", "by_person", Person.addresses))
+
+ # query
+ print q.all()
-dbfile = os.path.join(root, "beaker_demo.db")
-engine = create_engine('sqlite:///%s' % dbfile, echo=True)
-meta.Session.configure(bind=engine)
-
-# configure the "default" cache region.
-meta.cache_manager.regions['default'] ={
-
- # using type 'file' to illustrate
- # serialized persistence. In reality,
- # use memcached. Other backends
- # are much, much slower.
- 'type':'file',
- 'data_dir':root,
- 'expire':3600,
-
- # set start_time to current time
- # to re-cache everything
- # upon application startup
- #'start_time':time.time()
- }
-
-installed = False
-if not os.path.exists(dbfile):
- fixture_data.install()
- installed = True
\ No newline at end of file
+To run, both SQLAlchemy and Beaker (1.4 or greater) must be
+installed or on the current PYTHONPATH. The demo will create a local
+directory for datafiles, insert initial data, and run. Running the
+demo a second time will utilize the cache files already present, and
+exactly one SQL statement against two tables will be emitted - the
+displayed result however will utilize dozens of lazyloads that all
+pull from cache.
+
+Three endpoint scripts, in order of complexity, are run as follows::
+
+ python examples/beaker_caching/helloworld.py
+
+ python examples/beaker_caching/relation_caching.py
+
+ python examples/beaker_caching/advanced.py
+
+ python examples/beaker_caching/local_session_caching.py
+
+
+Listing of files:
+
+ environment.py - Establish data / cache file paths, and configurations,
+ bootstrap fixture data if necessary.
+
+ meta.py - Represent persistence structures which allow the usage of
+ Beaker caching with SQLAlchemy. Introduces a query option called
+ FromCache.
+
+ model.py - The datamodel, which represents Person that has multiple
+ Address objects, each with PostalCode, City, Country
+
+ fixture_data.py - creates demo PostalCode, Address, Person objects
+ in the database.
+
+ helloworld.py - the basic idea.
+
+ relation_caching.py - Illustrates how to add cache options on
+ relation endpoints, so that lazyloads load from cache.
+
+ advanced.py - Further examples of how to use FromCache. Combines
+ techniques from the first two scripts.
+
+ local_session_caching.py - Grok everything so far ? This example
+ creates a new Beaker container that will persist data in a dictionary
+ which is local to the current session. remove() the session
+ and the cache is gone.
+
+"""
"""
-import __init__ # if running as a script
+import environment
from model import Person, Address, cache_address_bits
from meta import Session, FromCache
from sqlalchemy.orm import eagerload
--- /dev/null
+"""environment.py
+
+Establish data / cache file paths, and configurations,
+bootstrap fixture data if necessary.
+
+"""
+import meta, model, fixture_data
+from sqlalchemy import create_engine
+import os
+
+root = "./beaker_data/"
+
+if not os.path.exists(root):
+ raw_input("Will create datafiles in %r.\n"
+ "To reset the cache + database, delete this directory.\n"
+ "Press enter to continue.\n" % root
+ )
+ os.makedirs(root)
+
+dbfile = os.path.join(root, "beaker_demo.db")
+engine = create_engine('sqlite:///%s' % dbfile, echo=True)
+meta.Session.configure(bind=engine)
+
+# configure the "default" cache region.
+meta.cache_manager.regions['default'] ={
+
+ # using type 'file' to illustrate
+ # serialized persistence. In reality,
+ # use memcached. Other backends
+ # are much, much slower.
+ 'type':'file',
+ 'data_dir':root,
+ 'expire':3600,
+
+ # set start_time to current time
+ # to re-cache everything
+ # upon application startup
+ #'start_time':time.time()
+ }
+
+installed = False
+if not os.path.exists(dbfile):
+ fixture_data.install()
+ installed = True
\ No newline at end of file
"""
-import __init__ # if running as a script
+import environment
from model import Person
from meta import Session, FromCache
if __name__ == '__main__':
- import __init__ # if running as a script
+ import environment
import meta
# create a Beaker container type called "ext:local_session".
term cache.
"""
-import __init__ # if running as a script
+import environment
from model import Person, Address, cache_address_bits
from meta import Session
from sqlalchemy.orm import eagerload
"To clear the cache, delete the directory %r. \n"\
"This will cause a re-load of cities, postal codes and countries on "\
"the next run.\n"\
- % os.path.join(__init__.root, 'container_file')
+ % os.path.join(environment.root, 'container_file')
+"""
+Two examples illustrating modifications to SQLAlchemy's attribute management system.
+
+``listen_for_events.py`` illustrates the usage of :class:`~sqlalchemy.orm.interfaces.AttributeExtension` to intercept attribute events. It additionally illustrates a way to automatically attach these listeners to all class attributes using a :class:`~sqlalchemy.orm.interfaces.InstrumentationManager`.
+
+``custom_management.py`` illustrates much deeper usage of :class:`~sqlalchemy.orm.interfaces.InstrumentationManager` as well as collection adaptation, to completely change the underlying method used to store state on an object. This example was developed to illustrate techniques which would be used by other third party object instrumentation systems to interact with SQLAlchemy's event system and is only intended for very intricate framework integrations.
+
+"""
\ No newline at end of file
class MyCollectionAdapter(object):
"""An wholly alternative instrumentation implementation."""
+
def __init__(self, key, state, collection):
self.key = key
self.state = state
+"""Illustrates a clever technique using Python descriptors to create custom attributes representing SQL expressions when used at the class level, and Python expressions when used at the instance level. In some cases this technique replaces the need to configure the attribute in the mapping, instead relying upon ordinary Python behavior to create custom expression components.
+
+E.g.::
+
+ class BaseInterval(object):
+ @hybrid
+ def contains(self,point):
+ return (self.start <= point) & (point < self.end)
+
+"""
-"""A couple of helper descriptors to allow to use the same code as query
-criterion creators and as instance code. As this doesn't do advanced
-magic recompiling, you can only use basic expression-like code."""
import new
+"""Illustrates how to place a dictionary-like facade on top of a "dynamic" relation, so
+that dictionary operations (assuming simple string keys) can operate upon a large
+collection without loading the full collection at once.
+
+"""
\ No newline at end of file
-"""Illustrates how to place a dictionary-like facade on top of a dynamic_loader, so\r
-that dictionary operations (assuming simple string keys) can operate upon a large \r
-collection without loading the full collection at once.\r
-\r
-Similar approaches could be taken towards sets and dictionaries with non-string keys \r
-although the hash policy of the members would need to be distilled into a filter() criterion.\r
-\r
-"""\r
-\r
class ProxyDict(object):\r
def __init__(self, parent, collection_name, childclass, keyname):\r
self.parent = parent\r
+"""
+Illustrates three strategies for persisting and querying XML documents as represented by
+ElementTree in a relational database. The techniques do not apply any mappings to the ElementTree objects directly, so are compatible with the native cElementTree as well as lxml, and can be adapted to suit any kind of DOM representation system. Querying along xpath-like strings is illustrated as well.
+
+In order of complexity:
+
+* ``pickle.py`` - Quick and dirty, serialize the whole DOM into a BLOB column. While the example
+ is very brief, it has very limited functionality.
+* ``adjacency_list.py`` - Each DOM node is stored in an individual table row, with attributes
+ represented in a separate table. The nodes are associated in a hierarchy using an adjacency list
+ structure. A query function is introduced which can search for nodes along any path with a given
+ structure of attributes, basically a (very narrow) subset of xpath.
+* ``optimized_al.py`` - Uses the same strategy as ``adjacency_list.py``, but adds a
+ :class:`~sqlalchemy.orm.interfaces.MapperExtension` which optimizes how the hierarchical structure
+ is loaded, such that the full set of DOM nodes are loaded within a single table result set, and
+ are organized hierarchically as they are received during a load.
+
+E.g.::
+
+ # parse an XML file and persist in the database
+ doc = ElementTree.parse("test.xml")
+ session.add(Document(file, doc))
+ session.commit()
+
+ # locate documents with a certain path/attribute structure
+ for document in find_document('/somefile/header/field2[@attr=foo]'):
+ # dump the XML
+ print document
+
+"""
\ No newline at end of file
import sys, os, StringIO, re
-import logging
-logging.basicConfig()
-
-# uncomment to show SQL statements
-#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
-
-# uncomment to show SQL statements and result sets
-#logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
-
-
from xml.etree import ElementTree
meta = MetaData()
import sys, os, StringIO, re
-import logging
-logging.basicConfig()
-
-# uncomment to show SQL statements
-#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
-
-# uncomment to show SQL statements and result sets
-#logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
-
-
from xml.etree import ElementTree
meta = MetaData()
import sys, os
-import logging
-logging.basicConfig()
-
-# uncomment to show SQL statements
-#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
-
-# uncomment to show SQL statements and result sets
-#logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
-
from xml.etree import ElementTree
engine = create_engine('sqlite://')
+"""
+An example of persistence for a directed graph structure. The graph is stored as a collection of edges, each referencing both a "lower" and an "upper" node in a table of nodes. Basic persistence and querying for lower- and upper- neighbors are illustrated::
+
+ n2 = Node(2)
+ n5 = Node(5)
+ n2.add_neighbor(n5)
+ print n2.higher_neighbors()
+
+"""
\ No newline at end of file
--- /dev/null
+"""
+Working examples of single-table, joined-table, and concrete-table inheritance as described in :ref:`datamapping_inheritance`.
+
+"""
\ No newline at end of file
return "Company %s" % self.name
-person_join = people.outerjoin(engineers).outerjoin(managers)
-
person_mapper = mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager')
c.employees.append(Manager(name='jsmith', status='ABA', manager_name='manager2'))
session.add(c)
-print session.new
session.flush()
session.expunge_all()
c = session.query(Company).get(1)
for e in c.employees:
- print e, e._sa_instance_state.key
+ print e
session.delete(c)
session.flush()
manager_mapper = mapper(Manager, inherits=person_mapper, polymorphic_identity='manager')
engineer_mapper = mapper(Engineer, inherits=person_mapper, polymorphic_identity='engineer')
-
-
mapper(Company, companies, properties={
'employees': relation(Person, lazy=True, backref='company')
})
c = session.query(Company).get(1)
for e in c.employees:
- print e, e._sa_instance_state.key, e.company
+ print e, e.company
print "\n"
c = session.query(Company).get(1)
for e in c.employees:
- print e, e._sa_instance_state.key
+ print e
session.delete(c)
session.flush()
+"""Large collection example.
+
+Illustrates the options to use with :func:`~sqlalchemy.orm.relation()` when the list of related objects is very large, including:
+
+* "dynamic" relations which query slices of data as accessed
+* how to use ON DELETE CASCADE in conjunction with ``passive_deletes=True`` to greatly improve the performance of related collection deletion.
+
+"""
-"""Large collection example.
-
-Illustrates the options to use on relation() when the list of related objects
-is very large.
-
-"""
from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey,
create_engine)
+"""
+Illustrates a rudimentary way to implement the "nested sets" pattern for hierarchical data using the SQLAlchemy ORM.
+
+"""
\ No newline at end of file
+"""
+Illustrates polymorphic associations, a method of associating a particular child object with many different types of parent object.
+
+This example is based off the original blog post at `<http://techspot.zzzeek.org/?p=13>`_ and illustrates three techniques:
+
+* ``poly_assoc.py`` - imitates the non-foreign-key schema used by Ruby on Rails' Active Record.
+* ``poly_assoc_fk.py`` - Adds a polymorphic association table so that referential integrity can be maintained.
+* ``poly_assoc_generic.py`` - further automates the approach of ``poly_assoc_fk.py`` to also generate the association table definitions automatically.
+
+"""
\ No newline at end of file
+"""A naive example illustrating techniques to help
+embed PostGIS functionality.
+
+This example was originally developed in the hopes that it would be extrapolated into a comprehensive PostGIS integration layer. We are pleased to announce that this has come to fruition as `GeoAlchemy <http://www.geoalchemy.org/>`_.
+
+The example illustrates:
+
+* a DDL extension which allows CREATE/DROP to work in
+ conjunction with AddGeometryColumn/DropGeometryColumn
+
+* a Geometry type, as well as a few subtypes, which
+ convert result row values to a GIS-aware object,
+ and also integrates with the DDL extension.
+
+* a GIS-aware object which stores a raw geometry value
+ and provides a factory for functions such as AsText().
+
+* an ORM comparator which can override standard column
+ methods on mapped objects to produce GIS operators.
+
+* an attribute event listener that intercepts strings
+ and converts to GeomFromText().
+
+* a standalone operator example.
+
+The implementation is limited to only public, well known
+and simple to use extension points.
+
+E.g.::
+
+ print session.query(Road).filter(Road.road_geom.intersects(r1.road_geom)).all()
+
+"""
+
-"""A naive example illustrating techniques to help
-embed PostGIS functionality.
-
-The techniques here could be used by a capable developer
-as the basis for a comprehensive PostGIS SQLAlchemy extension.
-Please note this is an entirely incomplete proof of concept
-only, and PostGIS support is *not* a supported feature
-of SQLAlchemy.
-
-Includes:
-
- * a DDL extension which allows CREATE/DROP to work in
- conjunction with AddGeometryColumn/DropGeometryColumn
-
- * a Geometry type, as well as a few subtypes, which
- convert result row values to a GIS-aware object,
- and also integrates with the DDL extension.
-
- * a GIS-aware object which stores a raw geometry value
- and provides a factory for functions such as AsText().
-
- * an ORM comparator which can override standard column
- methods on mapped objects to produce GIS operators.
-
- * an attribute event listener that intercepts strings
- and converts to GeomFromText().
-
- * a standalone operator example.
-
-The implementation is limited to only public, well known
-and simple to use extension points. Future SQLAlchemy
-expansion points may allow more seamless integration of
-some features.
-
-"""
-
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm.properties import ColumnProperty
from sqlalchemy.types import TypeEngine
+"""a basic example of using the SQLAlchemy Sharding API.
+Sharding refers to horizontally scaling data across multiple
+databases.
+
+The basic components of a "sharded" mapping are:
+
+* multiple databases, each assigned a 'shard id'
+* a function which can return a single shard id, given an instance
+ to be saved; this is called "shard_chooser"
+* a function which can return a list of shard ids which apply to a particular
+ instance identifier; this is called "id_chooser". If it returns all shard ids,
+ all shards will be searched.
+* a function which can return a list of shard ids to try, given a particular
+ Query ("query_chooser"). If it returns all shard ids, all shards will be
+ queried and the results joined together.
+
+In this example, four sqlite databases will store information about
+weather data on a database-per-continent basis. We provide example shard_chooser, id_chooser and query_chooser functions. The query_chooser illustrates inspection of the SQL expression element in order to attempt to determine a single shard being requested.
+
+"""
-"""a basic example of using the SQLAlchemy Sharding API.
-Sharding refers to horizontally scaling data across multiple
-databases.
-
-In this example, four sqlite databases will store information about
-weather data on a database-per-continent basis.
-
-To set up a sharding system, you need:
- 1. multiple databases, each assined a 'shard id'
- 2. a function which can return a single shard id, given an instance
- to be saved; this is called "shard_chooser"
- 3. a function which can return a list of shard ids which apply to a particular
- instance identifier; this is called "id_chooser". If it returns all shard ids,
- all shards will be searched.
- 4. a function which can return a list of shard ids to try, given a particular
- Query ("query_chooser"). If it returns all shard ids, all shards will be
- queried and the results joined together.
-"""
# step 1. imports
from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
--- /dev/null
+"""
+Illustrates an extension which creates version tables for entities and stores records for each change. The same idea as Elixir's versioned extension, but more efficient (uses attribute API to get history) and handles class inheritance. The given extensions generate an anonymous "history" class which represents historical versions of the target object.
+
+Usage is illustrated via a unit test module ``test_versioning.py``, which can be run via nose::
+
+ nosetests -w examples/versioning/
+
+A fragment of example usage, using declarative::
+
+ from history_meta import VersionedMeta, VersionedListener
+
+ Base = declarative_base(metaclass=VersionedMeta, bind=engine)
+ Session = sessionmaker(extension=VersionedListener())
+
+ class SomeClass(Base):
+ __tablename__ = 'sometable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(50))
+
+ def __eq__(self, other):
+ assert type(other) is SomeClass and other.id == self.id
+
+ sess = Session()
+ sc = SomeClass(name='sc1')
+ sess.add(sc)
+ sess.commit()
+
+ sc.name = 'sc1modified'
+ sess.commit()
+
+ assert sc.version == 2
+
+ SomeClassHistory = SomeClass.__history_mapper__.class_
+
+ assert sess.query(SomeClassHistory).\\
+ filter(SomeClassHistory.version == 1).\\
+ all() \\
+ == [SomeClassHistory(version=1, name='sc1')]
+
+To apply ``VersionedMeta`` to a subset of classes (probably more typical), the metaclass can be applied on a per-class basis::
+
+ from history_meta import VersionedMeta, VersionedListener
+
+ Base = declarative_base(bind=engine)
+
+ class SomeClass(Base):
+ __tablename__ = 'sometable'
+
+ # ...
+
+ class SomeVersionedClass(Base):
+ __metaclass__ = VersionedMeta
+ __tablename__ = 'someothertable'
+
+ # ...
+
+The ``VersionedMeta`` is a declarative metaclass - to use the extension with plain mappers, the ``_history_mapper`` function can be applied::
+
+ from history_meta import _history_mapper
+
+ m = mapper(SomeClass, sometable)
+ _history_mapper(m)
+
+ SomeHistoryClass = SomeClass.__history_mapper__.class_
+
+"""
\ No newline at end of file
--- /dev/null
+from sqlalchemy.ext.declarative import DeclarativeMeta
+from sqlalchemy.orm import mapper, class_mapper, attributes, object_mapper
+from sqlalchemy.orm.exc import UnmappedClassError, UnmappedColumnError
+from sqlalchemy import Table, Column, ForeignKeyConstraint, Integer
+from sqlalchemy.orm.interfaces import SessionExtension
+
+def col_references_table(col, table):
+ for fk in col.foreign_keys:
+ if fk.references(table):
+ return True
+ return False
+
+def _history_mapper(local_mapper):
+ cls = local_mapper.class_
+
+ # SLIGHT SQLA HACK #1 - set the "active_history" flag
+ # on on column-mapped attributes so that the old version
+ # of the info is always loaded (currently sets it on all attributes)
+ for prop in local_mapper.iterate_properties:
+ getattr(local_mapper.class_, prop.key).impl.active_history = True
+
+ super_mapper = local_mapper.inherits
+ super_history_mapper = getattr(cls, '__history_mapper__', None)
+
+ polymorphic_on = None
+ super_fks = []
+ if not super_mapper or local_mapper.local_table is not super_mapper.local_table:
+ cols = []
+ for column in local_mapper.local_table.c:
+ if column.name == 'version':
+ continue
+
+ col = column.copy()
+
+ if super_mapper and col_references_table(column, super_mapper.local_table):
+ super_fks.append((col.key, list(super_history_mapper.base_mapper.local_table.primary_key)[0]))
+
+ cols.append(col)
+
+ if column is local_mapper.polymorphic_on:
+ polymorphic_on = col
+
+ if super_mapper:
+ super_fks.append(('version', super_history_mapper.base_mapper.local_table.c.version))
+ cols.append(Column('version', Integer, primary_key=True))
+ else:
+ cols.append(Column('version', Integer, primary_key=True))
+
+ if super_fks:
+ cols.append(ForeignKeyConstraint(*zip(*super_fks)))
+
+ table = Table(local_mapper.local_table.name + '_history', local_mapper.local_table.metadata,
+ *cols
+ )
+ else:
+ # single table inheritance. take any additional columns that may have
+ # been added and add them to the history table.
+ for column in local_mapper.local_table.c:
+ if column.key not in super_history_mapper.local_table.c:
+ col = column.copy()
+ super_history_mapper.local_table.append_column(col)
+ table = None
+
+ if super_history_mapper:
+ bases = (super_history_mapper.class_,)
+ else:
+ bases = local_mapper.base_mapper.class_.__bases__
+ versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {})
+
+ m = mapper(
+ versioned_cls,
+ table,
+ inherits=super_history_mapper,
+ polymorphic_on=polymorphic_on,
+ polymorphic_identity=local_mapper.polymorphic_identity
+ )
+ cls.__history_mapper__ = m
+
+ if not super_history_mapper:
+ cls.version = Column('version', Integer, default=1, nullable=False)
+
+
+class VersionedMeta(DeclarativeMeta):
+ def __init__(cls, classname, bases, dict_):
+ DeclarativeMeta.__init__(cls, classname, bases, dict_)
+
+ try:
+ mapper = class_mapper(cls)
+ _history_mapper(mapper)
+ except UnmappedClassError:
+ pass
+
+
+def versioned_objects(iter):
+ for obj in iter:
+ if hasattr(obj, '__history_mapper__'):
+ yield obj
+
+def create_version(obj, session, deleted = False):
+ obj_mapper = object_mapper(obj)
+ history_mapper = obj.__history_mapper__
+ history_cls = history_mapper.class_
+
+ obj_state = attributes.instance_state(obj)
+
+ attr = {}
+
+ obj_changed = False
+
+ for om, hm in zip(obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()):
+ if hm.single:
+ continue
+
+ for hist_col in hm.local_table.c:
+ if hist_col.key == 'version':
+ continue
+
+ obj_col = om.local_table.c[hist_col.key]
+
+ # SLIGHT SQLA HACK #3 - get the value of the
+ # attribute based on the MapperProperty related to the
+ # mapped column. this will allow usage of MapperProperties
+ # that have a different keyname than that of the mapped column.
+ try:
+ prop = obj_mapper._get_col_to_prop(obj_col)
+ except UnmappedColumnError:
+ # in the case of single table inheritance, there may be
+ # columns on the mapped table intended for the subclass only.
+ # the "unmapped" status of the subclass column on the
+ # base class is a feature of the declarative module as of sqla 0.5.2.
+ continue
+
+ # expired object attributes and also deferred cols might not be in the
+ # dict. force it to load no matter what by using getattr().
+ if prop.key not in obj_state.dict:
+ getattr(obj, prop.key)
+
+ a, u, d = attributes.get_history(obj, prop.key)
+
+ if d:
+ attr[hist_col.key] = d[0]
+ obj_changed = True
+ elif u:
+ attr[hist_col.key] = u[0]
+ else:
+ raise Exception("TODO: what makes us arrive here ?")
+
+ if not obj_changed and not deleted:
+ return
+
+ attr['version'] = obj.version
+ hist = history_cls()
+ for key, value in attr.iteritems():
+ setattr(hist, key, value)
+ session.add(hist)
+ obj.version += 1
+
+class VersionedListener(SessionExtension):
+ def before_flush(self, session, flush_context, instances):
+ for obj in versioned_objects(session.dirty):
+ create_version(obj, session)
+ for obj in versioned_objects(session.deleted):
+ create_version(obj, session, deleted = True)
+
--- /dev/null
+from sqlalchemy.ext.declarative import declarative_base
+from history_meta import VersionedMeta, VersionedListener
+from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
+from sqlalchemy.orm import clear_mappers, compile_mappers, sessionmaker, deferred
+from sqlalchemy.test.testing import TestBase, eq_
+from sqlalchemy.test.entities import ComparableEntity
+
+def setup():
+ global engine
+ engine = create_engine('sqlite://', echo=True)
+
+class TestVersioning(TestBase):
+ def setup(self):
+ global Base, Session
+ Base = declarative_base(metaclass=VersionedMeta, bind=engine)
+ Session = sessionmaker(extension=VersionedListener())
+
+ def teardown(self):
+ clear_mappers()
+ Base.metadata.drop_all()
+
+ def create_tables(self):
+ Base.metadata.create_all()
+
+ def test_plain(self):
+ class SomeClass(Base, ComparableEntity):
+ __tablename__ = 'sometable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(50))
+
+ self.create_tables()
+ sess = Session()
+ sc = SomeClass(name='sc1')
+ sess.add(sc)
+ sess.commit()
+
+ sc.name = 'sc1modified'
+ sess.commit()
+
+ assert sc.version == 2
+
+ SomeClassHistory = SomeClass.__history_mapper__.class_
+
+ eq_(
+ sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(),
+ [SomeClassHistory(version=1, name='sc1')]
+ )
+
+ sc.name = 'sc1modified2'
+
+ eq_(
+ sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
+ [
+ SomeClassHistory(version=1, name='sc1'),
+ SomeClassHistory(version=2, name='sc1modified')
+ ]
+ )
+
+ assert sc.version == 3
+
+ sess.commit()
+
+ sc.name = 'temp'
+ sc.name = 'sc1modified2'
+
+ sess.commit()
+
+ eq_(
+ sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
+ [
+ SomeClassHistory(version=1, name='sc1'),
+ SomeClassHistory(version=2, name='sc1modified')
+ ]
+ )
+
+ sess.delete(sc)
+ sess.commit()
+
+ eq_(
+ sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(),
+ [
+ SomeClassHistory(version=1, name='sc1'),
+ SomeClassHistory(version=2, name='sc1modified'),
+ SomeClassHistory(version=3, name='sc1modified2')
+ ]
+ )
+
+
+
+
+ def test_deferred(self):
+ """test versioning of unloaded, deferred columns."""
+
+ class SomeClass(Base, ComparableEntity):
+ __tablename__ = 'sometable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(50))
+ data = deferred(Column(String(25)))
+
+ self.create_tables()
+ sess = Session()
+ sc = SomeClass(name='sc1', data='somedata')
+ sess.add(sc)
+ sess.commit()
+ sess.close()
+
+ sc = sess.query(SomeClass).first()
+ assert 'data' not in sc.__dict__
+
+ sc.name = 'sc1modified'
+ sess.commit()
+
+ assert sc.version == 2
+
+ SomeClassHistory = SomeClass.__history_mapper__.class_
+
+ eq_(
+ sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(),
+ [SomeClassHistory(version=1, name='sc1', data='somedata')]
+ )
+
+
+ def test_joined_inheritance(self):
+ class BaseClass(Base, ComparableEntity):
+ __tablename__ = 'basetable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(50))
+ type = Column(String(20))
+
+ __mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'}
+
+ class SubClassSeparatePk(BaseClass):
+ __tablename__ = 'subtable1'
+
+ id = Column(Integer, primary_key=True)
+ base_id = Column(Integer, ForeignKey('basetable.id'))
+ subdata1 = Column(String(50))
+
+ __mapper_args__ = {'polymorphic_identity':'sep'}
+
+ class SubClassSamePk(BaseClass):
+ __tablename__ = 'subtable2'
+
+ id = Column(Integer, ForeignKey('basetable.id'), primary_key=True)
+ subdata2 = Column(String(50))
+
+ __mapper_args__ = {'polymorphic_identity':'same'}
+
+ self.create_tables()
+ sess = Session()
+
+ sep1 = SubClassSeparatePk(name='sep1', subdata1='sep1subdata')
+ base1 = BaseClass(name='base1')
+ same1 = SubClassSamePk(name='same1', subdata2='same1subdata')
+ sess.add_all([sep1, base1, same1])
+ sess.commit()
+
+ base1.name = 'base1mod'
+ same1.subdata2 = 'same1subdatamod'
+ sep1.name ='sep1mod'
+ sess.commit()
+
+ BaseClassHistory = BaseClass.__history_mapper__.class_
+ SubClassSeparatePkHistory = SubClassSeparatePk.__history_mapper__.class_
+ SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_
+ eq_(
+ sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(),
+ [
+ SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
+ BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
+ SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1)
+ ]
+ )
+
+ same1.subdata2 = 'same1subdatamod2'
+
+ eq_(
+ sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
+ [
+ SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
+ BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
+ SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
+ SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
+ ]
+ )
+
+ base1.name = 'base1mod2'
+ eq_(
+ sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
+ [
+ SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
+ BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
+ BaseClassHistory(id=2, name=u'base1mod', type=u'base', version=2),
+ SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
+ SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
+ ]
+ )
+
+ def test_single_inheritance(self):
+ class BaseClass(Base, ComparableEntity):
+ __tablename__ = 'basetable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(50))
+ type = Column(String(50))
+ __mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'}
+
+ class SubClass(BaseClass):
+
+ subname = Column(String(50))
+ __mapper_args__ = {'polymorphic_identity':'sub'}
+
+ self.create_tables()
+ sess = Session()
+
+ b1 = BaseClass(name='b1')
+ sc = SubClass(name='s1', subname='sc1')
+
+ sess.add_all([b1, sc])
+
+ sess.commit()
+
+ b1.name='b1modified'
+
+ BaseClassHistory = BaseClass.__history_mapper__.class_
+ SubClassHistory = SubClass.__history_mapper__.class_
+
+ eq_(
+ sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
+ [BaseClassHistory(id=1, name=u'b1', type=u'base', version=1)]
+ )
+
+ sc.name ='s1modified'
+ b1.name='b1modified2'
+
+ eq_(
+ sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
+ [
+ BaseClassHistory(id=1, name=u'b1', type=u'base', version=1),
+ BaseClassHistory(id=1, name=u'b1modified', type=u'base', version=2),
+ SubClassHistory(id=2, name=u's1', type=u'sub', version=1)
+ ]
+ )
+
+
+"""
+Illustrates "vertical table" mappings.
+
+A "vertical table" refers to a technique where individual attributes of an object are stored as distinct rows in a table.
+The "vertical table" technique is used to persist objects which can have a varied set of attributes, at the expense of simple query control and brevity. It is commonly found in content/document management systems in order to represent user-created structures flexibly.
+
+Two variants on the approach are given. In the second, each row references a "datatype" which contains information about the type of information stored in the attribute, such as integer, string, or date.
+
+
+Example::
+
+ shrew = Animal(u'shrew')
+ shrew[u'cuteness'] = 5
+ shrew[u'weasel-like'] = False
+ shrew[u'poisonous'] = True
+
+ session.add(shrew)
+ session.flush()
+
+ q = (session.query(Animal).
+ filter(Animal.facts.any(
+ and_(AnimalFact.key == u'weasel-like',
+ AnimalFact.value == True))))
+ print 'weasel-like animals', q.all()
+
+
+"""
\ No newline at end of file
reading and writing those various '_value' columns and keeps the '.type' up to
date.
-Note: Something much like 'comparable_property' is slated for inclusion in a
- future version of SQLAlchemy.
"""
from sqlalchemy.orm.interfaces import PropComparator
Because the key/value pairs in a vertical scheme are not fixed in advance,
accessing them like a Python dict can be very convenient. The example below
can be used with many common vertical schemas as-is or with minor adaptations.
+
"""
class VerticalProperty(object):
+++ /dev/null
-"""this example illustrates a "vertical table". an object is stored with individual attributes
-represented in distinct database rows. This allows objects to be created with dynamically changing
-fields that are all persisted in a normalized fashion."""
-
-from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String,
- ForeignKey, PickleType, DateTime, and_)
-from sqlalchemy.orm import mapper, relation, sessionmaker, scoped_session
-from sqlalchemy.orm.collections import mapped_collection
-import datetime
-
-engine = create_engine('sqlite://', echo=False)
-meta = MetaData(engine)
-
-Session = scoped_session(sessionmaker())
-
-# represent Entity objects
-entities = Table('entities', meta,
- Column('entity_id', Integer, primary_key=True),
- Column('title', String(100), nullable=False),
- )
-
-# represent named, typed fields
-entity_fields = Table('entity_fields', meta,
- Column('field_id', Integer, primary_key=True),
- Column('name', String(40), nullable=False),
- Column('datatype', String(30), nullable=False))
-
-# associate a field row with an entity row, including a typed value
-entity_values = Table('entity_values', meta,
- Column('value_id', Integer, primary_key=True),
- Column('field_id', Integer, ForeignKey('entity_fields.field_id'), nullable=False),
- Column('entity_id', Integer, ForeignKey('entities.entity_id'), nullable=False),
- Column('int_value', Integer),
- Column('string_value', String(500)),
- Column('binary_value', PickleType),
- Column('datetime_value', DateTime))
-
-meta.create_all()
-
-class Entity(object):
- """a persistable dynamic object.
-
- Marshalls attributes into a dictionary which is
- mapped to the database.
-
- """
- def __init__(self, **kwargs):
- for k in kwargs:
- setattr(self, k, kwargs[k])
-
- def __getattr__(self, key):
- """Proxy requests for attributes to the underlying _entities dictionary."""
-
- if key[0] == '_':
- return super(Entity, self).__getattr__(key)
- try:
- return self._entities[key].value
- except KeyError:
- raise AttributeError(key)
-
- def __setattr__(self, key, value):
- """Proxy requests for attribute set operations to the underlying _entities dictionary."""
-
- if key[0] == "_" or hasattr(Entity, key):
- object.__setattr__(self, key, value)
- return
-
- try:
- ev = self._entities[key]
- ev.value = value
- except KeyError:
- ev = _EntityValue(key, value)
- self._entities[key] = ev
-
-class _EntityField(object):
- """Represents a field of a particular name and datatype."""
-
- def __init__(self, name, datatype):
- self.name = name
- self.datatype = datatype
-
-class _EntityValue(object):
- """Represents an individual value."""
-
- def __init__(self, key, value):
- datatype = self._figure_datatype(value)
- field = \
- Session.query(_EntityField).filter(
- and_(_EntityField.name==key, _EntityField.datatype==datatype)
- ).first()
-
- if not field:
- field = _EntityField(key, datatype)
- Session.add(field)
-
- self.field = field
- setattr(self, self.field.datatype + "_value", value)
-
- def _figure_datatype(self, value):
- typemap = {
- int:'int',
- str:'string',
- datetime.datetime:'datetime',
- }
- for k in typemap:
- if isinstance(value, k):
- return typemap[k]
- else:
- return 'binary'
-
- def _get_value(self):
- return getattr(self, self.field.datatype + "_value")
-
- def _set_value(self, value):
- setattr(self, self.field.datatype + "_value", value)
- value = property(_get_value, _set_value)
-
- def name(self):
- return self.field.name
- name = property(name)
-
-
-# the mappers are a straightforward eager chain of
-# Entity--(1->many)->EntityValue-(many->1)->EntityField
-# notice that we are identifying each mapper to its connecting
-# relation by just the class itself.
-mapper(_EntityField, entity_fields)
-mapper(
- _EntityValue, entity_values,
- properties = {
- 'field' : relation(_EntityField, lazy=False, cascade='all')
- }
-)
-
-mapper(Entity, entities, properties = {
- '_entities' : relation(
- _EntityValue,
- lazy=False,
- cascade='all',
- collection_class=mapped_collection(lambda entityvalue: entityvalue.field.name)
- )
-})
-
-session = Session()
-entity1 = Entity(
- title = 'this is the first entity',
- name = 'this is the name',
- price = 43,
- data = ('hello', 'there')
-)
-
-entity2 = Entity(
- title = 'this is the second entity',
- name = 'this is another name',
- price = 50,
- data = ('hoo', 'ha')
-)
-
-session.add_all([entity1, entity2])
-session.commit()
-
-for entity in session.query(Entity):
- print "Entity id %d:" % entity.entity_id, entity.title, entity.name, entity.price, entity.data
-
-# perform some changes, add a new Entity
-
-entity1.price = 90
-entity1.title = 'another new title'
-entity2.data = {'oof':5,'lala':8}
-
-entity3 = Entity(
- title = 'third entity',
- name = 'new name',
- price = '$1.95', # note we change 'price' to be a string.
- # this creates a new _EntityField separate from the
- # one used by integer 'price'.
- data = 'some data'
-)
-session.add(entity3)
-
-session.commit()
-
-print "----------------"
-for entity in session.query(Entity):
- print "Entity id %d:" % entity.entity_id, entity.title, entity.name, entity.price, entity.data
-
-print "----------------"
-# illustrate each _EntityField that's been created and list each Entity which uses it
-for ent_id, name, datatype in session.query(_EntityField.field_id, _EntityField.name, _EntityField.datatype):
- print name, datatype, "(Enitites:", ",".join([
- str(entid) for entid in session.query(Entity.entity_id).\
- join(
- (_EntityValue, _EntityValue.entity_id==Entity.entity_id),
- (_EntityField, _EntityField.field_id==_EntityValue.field_id)
- ).filter(_EntityField.field_id==ent_id)
- ]), ")"
-
-# delete all the Entity objects
-for entity in session.query(Entity):
- session.delete(entity)
-session.commit()
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
-
+
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
--- /dev/null
+import sqlalchemy as sa
+from sqlalchemy import exc as sa_exc
+
+_repr_stack = set()
+class BasicEntity(object):
+ def __init__(self, **kw):
+ for key, value in kw.iteritems():
+ setattr(self, key, value)
+
+ def __repr__(self):
+ if id(self) in _repr_stack:
+ return object.__repr__(self)
+ _repr_stack.add(id(self))
+ try:
+ return "%s(%s)" % (
+ (self.__class__.__name__),
+ ', '.join(["%s=%r" % (key, getattr(self, key))
+ for key in sorted(self.__dict__.keys())
+ if not key.startswith('_')]))
+ finally:
+ _repr_stack.remove(id(self))
+
+_recursion_stack = set()
+class ComparableEntity(BasicEntity):
+ def __hash__(self):
+ return hash(self.__class__)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ """'Deep, sparse compare.
+
+ Deeply compare two entities, following the non-None attributes of the
+ non-persisted object, if possible.
+
+ """
+ if other is self:
+ return True
+ elif not self.__class__ == other.__class__:
+ return False
+
+ if id(self) in _recursion_stack:
+ return True
+ _recursion_stack.add(id(self))
+
+ try:
+ # pick the entity thats not SA persisted as the source
+ try:
+ self_key = sa.orm.attributes.instance_state(self).key
+ except sa.orm.exc.NO_STATE:
+ self_key = None
+
+ if other is None:
+ a = self
+ b = other
+ elif self_key is not None:
+ a = other
+ b = self
+ else:
+ a = self
+ b = other
+
+ for attr in a.__dict__.keys():
+ if attr.startswith('_'):
+ continue
+ value = getattr(a, attr)
+
+ try:
+ # handle lazy loader errors
+ battr = getattr(b, attr)
+ except (AttributeError, sa_exc.UnboundExecutionError):
+ return False
+
+ if hasattr(value, '__iter__'):
+ if list(value) != list(battr):
+ return False
+ else:
+ if value is not None and value != battr:
+ return False
+ return True
+ finally:
+ _recursion_stack.remove(id(self))
[nosetests]
with-sqlalchemy = true
+exclude = ^examples
\ No newline at end of file
from sqlalchemy.test.testing import resolve_artifact_names, adict
from sqlalchemy.test.engines import drop_all_tables
from sqlalchemy.util import function_named
-
-
-_repr_stack = set()
-class BasicEntity(object):
- def __init__(self, **kw):
- for key, value in kw.iteritems():
- setattr(self, key, value)
-
- def __repr__(self):
- if id(self) in _repr_stack:
- return object.__repr__(self)
- _repr_stack.add(id(self))
- try:
- return "%s(%s)" % (
- (self.__class__.__name__),
- ', '.join(["%s=%r" % (key, getattr(self, key))
- for key in sorted(self.__dict__.keys())
- if not key.startswith('_')]))
- finally:
- _repr_stack.remove(id(self))
+from sqlalchemy.test.entities import BasicEntity, ComparableEntity
Entity = BasicEntity
-_recursion_stack = set()
-class ComparableEntity(BasicEntity):
- def __hash__(self):
- return hash(self.__class__)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __eq__(self, other):
- """'Deep, sparse compare.
-
- Deeply compare two entities, following the non-None attributes of the
- non-persisted object, if possible.
-
- """
- if other is self:
- return True
- elif not self.__class__ == other.__class__:
- return False
-
- if id(self) in _recursion_stack:
- return True
- _recursion_stack.add(id(self))
-
- try:
- # pick the entity thats not SA persisted as the source
- try:
- self_key = sa.orm.attributes.instance_state(self).key
- except sa.orm.exc.NO_STATE:
- self_key = None
-
- if other is None:
- a = self
- b = other
- elif self_key is not None:
- a = other
- b = self
- else:
- a = self
- b = other
-
- for attr in a.__dict__.keys():
- if attr.startswith('_'):
- continue
- value = getattr(a, attr)
-
- try:
- # handle lazy loader errors
- battr = getattr(b, attr)
- except (AttributeError, sa_exc.UnboundExecutionError):
- return False
-
- if hasattr(value, '__iter__'):
- if list(value) != list(battr):
- return False
- else:
- if value is not None and value != battr:
- return False
- return True
- finally:
- _recursion_stack.remove(id(self))
-
class ORMTest(testing.TestBase, testing.AssertsExecutionResults):
__requires__ = ('subqueries',)