From: Mike Bayer Date: Fri, 28 Mar 2014 20:32:11 +0000 (-0400) Subject: - Fixed ORM bug where changing the primary key of an object, then marking X-Git-Tag: rel_0_8_6~5 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e789a75f9a32413904d04e39de73be82df6a1bb7;p=thirdparty%2Fsqlalchemy%2Fsqlalchemy.git - Fixed ORM bug where changing the primary key of an object, then marking it for DELETE would fail to target the correct row for DELETE. Then to compound matters, basic "number of rows matched" checks were not being performed. Both issues are fixed, however note that the "rows matched" check requires so-called "sane multi-row count" functionality; the DBAPI's executemany() method must count up the rows matched by individual statements and SQLAlchemy's dialect must mark this feature as supported, currently applies to some mysql dialects, psycopg2, sqlite only. fixes #3006 - Enabled "sane multi-row count" checking for the psycopg2 DBAPI, as this seems to be supported as of psycopg2 2.0.9. Conflicts: test/orm/test_naturalpks.py test/requirements.py --- diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst index f8f4232957..bcd7ca6c9b 100644 --- a/doc/build/changelog/changelog_08.rst +++ b/doc/build/changelog/changelog_08.rst @@ -11,6 +11,28 @@ .. changelog:: :version: 0.8.6 + .. change:: + :tags: bug, orm + :tickets: 3006 + :versions: 0.9.4 + + Fixed ORM bug where changing the primary key of an object, then marking + it for DELETE would fail to target the correct row for DELETE. + Then to compound matters, basic "number of rows matched" checks were + not being performed. Both issues are fixed, however note that the + "rows matched" check requires so-called "sane multi-row count" + functionality; the DBAPI's executemany() method must count up the + rows matched by individual statements and SQLAlchemy's dialect must + mark this feature as supported, currently applies to some mysql dialects, + psycopg2, sqlite only. + + .. change:: + :tags: feature, postgresql + :versions: 0.9.4 + + Enabled "sane multi-row count" checking for the psycopg2 DBAPI, as + this seems to be supported as of psycopg2 2.0.9. + .. change:: :tags: bug, postgresql :tickets: 3000 diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index ed508d9274..b901bcc56c 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -346,7 +346,7 @@ class PGDialect_psycopg2(PGDialect): supports_unicode_statements = False # end Py2K default_paramstyle = 'pyformat' - supports_sane_multi_rowcount = False + supports_sane_multi_rowcount = False # set to true based on psycopg2 version execution_ctx_cls = PGExecutionContext_psycopg2 statement_compiler = PGCompiler_psycopg2 preparer = PGIdentifierPreparer_psycopg2 @@ -390,6 +390,9 @@ class PGDialect_psycopg2(PGDialect): self._hstore_oids(connection.connection) \ is not None + # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9 + self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9) + @classmethod def dbapi(cls): import psycopg2 diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 70ee40dc92..e7d6c46ceb 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -402,6 +402,8 @@ class DefaultExecutionContext(interfaces.ExecutionContext): self.compiled = compiled if not compiled.can_execute: + import pdb + pdb.set_trace() raise exc.ArgumentError("Not an executable clause") self.execution_options = compiled.statement._execution_options diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index afec8d15e5..e655099a47 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -91,8 +91,27 @@ attribute, so that the ordering is correct when first loaded. .. warning:: :class:`.OrderingList` only provides limited functionality when a primary - key column or unique column is the target of the sort. Since changing the - order of entries often means that two rows must trade values, this is not + key column or unique column is the target of the sort. The two operations + that are unsupported or are problematic are: + + * two entries must trade values. This is not supported directly in the + case of a primary key or unique constraint because it means at least + one row would need to be temporarily removed first, or changed to + a third, neutral value while the switch occurs. + + * an entry must be deleted in order to make room for a new entry. SQLAlchemy's + unit of work performs all INSERTs before DELETEs within a single flush + by default. A future feature will allow this to be configurable for + specific sets of columns on mappings. + + Additional issues when using primary keys as ordering keys are that UPDATE + or DELETE statements on target rows may fail to fire correctly as orderinglist + may change their primary key value + + + Since changing the + order of entries often means that either rows must trade values, + or rows must be deleted to make way for new inserts, this is not possible when the value is constrained by a primary key or unique constraint, since one of the rows would temporarily have to point to a third available value so that the other row could take its old diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 6b9a6b9a4b..9ad99d0649 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -441,7 +441,7 @@ def _collect_delete_commands(base_mapper, uowtransaction, table, for col in mapper._pks_by_table[table]: params[col.key] = \ value = \ - mapper._get_state_attr_by_column( + mapper._get_committed_state_attr_by_column( state, state_dict, col) if value is None: raise orm_exc.FlushError( @@ -655,21 +655,19 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, connection = cached_connections[connection] - if need_version_id: - # TODO: need test coverage for this [ticket:1761] + expected = len(del_objects) + rows_matched = -1 + if connection.dialect.supports_sane_multi_rowcount: + c = connection.execute(statement, del_objects) + rows_matched = c.rowcount + elif need_version_id: if connection.dialect.supports_sane_rowcount: - rows = 0 + rows_matched = 0 # execute deletes individually so that versioned # rows can be verified for params in del_objects: c = connection.execute(statement, params) - rows += c.rowcount - if rows != len(del_objects): - raise orm_exc.StaleDataError( - "DELETE statement on table '%s' expected to " - "delete %d row(s); %d were matched." % - (table.description, len(del_objects), c.rowcount) - ) + rows_matched += c.rowcount else: util.warn( "Dialect %s does not support deleted rowcount " @@ -680,6 +678,12 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, else: connection.execute(statement, del_objects) + if rows_matched > -1 and expected != rows_matched: + raise orm_exc.StaleDataError( + "DELETE statement on table '%s' expected to " + "delete %d row(s); %d were matched." % + (table.description, expected, rows_matched) + ) def _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update): diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index 920badd922..87c53cd646 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -66,6 +66,9 @@ class skip_if(object): self._fails_on = skip_if(other, reason) return self + def fails_on_everything_except(self, *dbs): + self._fails_on = skip_if(fails_on_everything_except(*dbs)) + return self class fails_if(skip_if): def __call__(self, fn): diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 055b8ec708..c43a7d0606 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -46,6 +46,12 @@ class SuiteRequirements(Requirements): return exclusions.open() + @property + def non_updating_cascade(self): + """target database must *not* support ON UPDATE..CASCADE behavior in + foreign keys.""" + return exclusions.closed() + @property def deferrable_fks(self): return exclusions.closed() diff --git a/test/orm/test_naturalpks.py b/test/orm/test_naturalpks.py index d30cdc5989..e11adb5c68 100644 --- a/test/orm/test_naturalpks.py +++ b/test/orm/test_naturalpks.py @@ -388,10 +388,7 @@ class NaturalPKTest(fixtures.MappedTest): def test_manytomany_passive(self): self._test_manytomany(True) - # mysqldb executemany() of the association table fails to - # report the correct row count - @testing.fails_if(lambda: testing.against('mysql') - and not testing.against('+zxjdbc')) + @testing.requires.non_updating_cascade def test_manytomany_nonpassive(self): self._test_manytomany(False) @@ -792,8 +789,7 @@ class CascadeToFKPKTest(fixtures.MappedTest, testing.AssertsCompiledSQL): def test_onetomany_passive(self): self._test_onetomany(True) - # PG etc. need passive=True to allow PK->PK cascade - @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + @testing.requires.non_updating_cascade def test_onetomany_nonpassive(self): self._test_onetomany(False) @@ -870,7 +866,7 @@ class CascadeToFKPKTest(fixtures.MappedTest, testing.AssertsCompiledSQL): def test_change_m2o_passive(self): self._test_change_m2o(True) - @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + @testing.requires.non_updating_cascade def test_change_m2o_nonpassive(self): self._test_change_m2o(False) @@ -1056,8 +1052,7 @@ class JoinedInheritanceTest(fixtures.MappedTest): def test_pk_passive(self): self._test_pk(True) - # PG etc. need passive=True to allow PK->PK cascade - @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + @testing.requires.non_updating_cascade def test_pk_nonpassive(self): self._test_pk(False) @@ -1066,8 +1061,7 @@ class JoinedInheritanceTest(fixtures.MappedTest): self._test_fk(True) # PG etc. need passive=True to allow PK->PK cascade - @testing.fails_on_everything_except('sqlite', 'mysql+zxjdbc', 'oracle', - 'postgresql+zxjdbc') + @testing.requires.non_updating_cascade def test_fk_nonpassive(self): self._test_fk(False) diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py index 7c7337c628..8bdb8cad6b 100644 --- a/test/orm/test_unitofworkv2.py +++ b/test/orm/test_unitofworkv2.py @@ -1203,20 +1203,20 @@ class RowswitchAccountingTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('parent', metadata, - Column('id', Integer, primary_key=True) + Column('id', Integer, primary_key=True), + Column('data', Integer) ) Table('child', metadata, - Column('id', Integer, ForeignKey('parent.id'), primary_key=True) + Column('id', Integer, ForeignKey('parent.id'), primary_key=True), + Column('data', Integer) ) - def test_accounting_for_rowswitch(self): + def _fixture(self): parent, child = self.tables.parent, self.tables.child - class Parent(object): - def __init__(self, id): - self.id = id - self.child = Child() - class Child(object): + class Parent(fixtures.BasicEntity): + pass + class Child(fixtures.BasicEntity): pass mapper(Parent, parent, properties={ @@ -1225,15 +1225,19 @@ class RowswitchAccountingTest(fixtures.MappedTest): backref="parent") }) mapper(Child, child) + return Parent, Child + + def test_switch_on_update(self): + Parent, Child = self._fixture() sess = create_session(autocommit=False) - p1 = Parent(1) + p1 = Parent(id=1, child=Child()) sess.add(p1) sess.commit() sess.close() - p2 = Parent(1) + p2 = Parent(id=1, child=Child()) p3 = sess.merge(p2) old = attributes.get_history(p3, 'child')[2][0] @@ -1244,7 +1248,7 @@ class RowswitchAccountingTest(fixtures.MappedTest): assert p3.child._sa_instance_state.session_id == sess.hash_key assert p3.child in sess - p4 = Parent(1) + p4 = Parent(id=1, child=Child()) p5 = sess.merge(p4) old = attributes.get_history(p5, 'child')[2][0] @@ -1252,6 +1256,88 @@ class RowswitchAccountingTest(fixtures.MappedTest): sess.flush() + def test_switch_on_delete(self): + Parent, Child = self._fixture() + + sess = Session() + p1 = Parent(id=1, data=2, child=None) + sess.add(p1) + sess.flush() + + p1.id = 5 + sess.delete(p1) + eq_(p1.id, 5) + sess.flush() + + eq_(sess.scalar(self.tables.parent.count()), 0) + + +class BasicStaleChecksTest(fixtures.MappedTest): + @classmethod + def define_tables(cls, metadata): + Table('parent', metadata, + Column('id', Integer, primary_key=True), + Column('data', Integer) + ) + Table('child', metadata, + Column('id', Integer, ForeignKey('parent.id'), primary_key=True), + Column('data', Integer) + ) + + def _fixture(self): + parent, child = self.tables.parent, self.tables.child + + class Parent(fixtures.BasicEntity): + pass + class Child(fixtures.BasicEntity): + pass + + mapper(Parent, parent, properties={ + 'child':relationship(Child, uselist=False, + cascade="all, delete-orphan", + backref="parent") + }) + mapper(Child, child) + return Parent, Child + + def test_update_single_missing(self): + Parent, Child = self._fixture() + sess = Session() + p1 = Parent(id=1, data=2) + sess.add(p1) + sess.flush() + + sess.execute(self.tables.parent.delete()) + + p1.data = 3 + assert_raises_message( + orm_exc.StaleDataError, + "UPDATE statement on table 'parent' expected to " + "update 1 row\(s\); 0 were matched.", + sess.flush + ) + + @testing.requires.sane_multi_rowcount + def test_delete_multi_missing(self): + Parent, Child = self._fixture() + sess = Session() + p1 = Parent(id=1, data=2, child=None) + p2 = Parent(id=2, data=3, child=None) + sess.add_all([p1, p2]) + sess.flush() + + sess.execute(self.tables.parent.delete()) + sess.delete(p1) + sess.delete(p2) + + assert_raises_message( + orm_exc.StaleDataError, + "DELETE statement on table 'parent' expected to " + "delete 2 row\(s\); 0 were matched.", + sess.flush + ) + + class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults): @classmethod def define_tables(cls, metadata): diff --git a/test/requirements.py b/test/requirements.py index 25b5b72058..b8591f199a 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -61,6 +61,13 @@ class DefaultRequirements(SuiteRequirements): 'target backend does not support ON UPDATE CASCADE' ) + @property + def non_updating_cascade(self): + """target database must *not* support ON UPDATE..CASCADE behavior in + foreign keys.""" + + return fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + skip_if('mssql') + @property def deferrable_fks(self): """target database must support deferrable fks""" @@ -427,7 +434,7 @@ class DefaultRequirements(SuiteRequirements): @property def sane_multi_rowcount(self): - return skip_if( + return fails_if( lambda: not self.db.dialect.supports_sane_multi_rowcount, "driver doesn't support 'sane' multi row count" )