]> git.ipfire.org Git - thirdparty/samba.git/commitdiff
repl: Retry replication of the schema on WERR_DS_DRA_SCHEMA_MISMATCH
authorAndrew Bartlett <abartlet@samba.org>
Wed, 4 May 2016 04:19:09 +0000 (16:19 +1200)
committerAndrew Bartlett <abartlet@samba.org>
Mon, 6 Jun 2016 06:50:09 +0000 (08:50 +0200)
This makes us replicate the schema, and then go back to what we asked to replicate
originally, when the schema changes.  This should make the replication much more
robust after schema changes

Signed-off-by: Andrew Bartlett <abartlet@samba.org>
Reviewed-by: Garming Sam <garming@catalyst.net.nz>
source4/dsdb/repl/drepl_out_helpers.c
source4/dsdb/repl/drepl_service.h
source4/dsdb/repl/replicated_objects.c
source4/torture/drs/python/repl_schema.py

index a217e833e73f9198e37931372f0dd57a5cf19c21..54f44c60f7eafd8416698557bd45643dce01ea05 100644 (file)
@@ -772,7 +772,93 @@ static void dreplsrv_op_pull_source_apply_changes_trigger(struct tevent_req *req
                                                 &drsuapi->gensec_skey,
                                                 dsdb_repl_flags,
                                                 state, &objects);
-       if (!W_ERROR_IS_OK(status)) {
+
+       if (W_ERROR_EQUAL(status, WERR_DS_DRA_SCHEMA_MISMATCH)
+           && state->op->source_dsa_retry == NULL) {
+               struct dreplsrv_partition *p;
+
+               /*
+                * Change info sync or extended operation into a fetch
+                * of the schema partition, so we get all the schema
+                * objects we need.
+                *
+                * We don't want to re-do the remote exop,
+                * unless it was REPL_SECRET so we set the
+                * fallback operation to just be a fetch of
+                * the relevent partition.
+                */
+
+
+               if (state->op->extended_op == DRSUAPI_EXOP_REPL_SECRET) {
+                       state->op->extended_op_retry = state->op->extended_op;
+               } else {
+                       state->op->extended_op_retry = DRSUAPI_EXOP_NONE;
+               }
+               state->op->extended_op = DRSUAPI_EXOP_NONE;
+
+               if (ldb_dn_compare(nc_root, partition->dn) == 0) {
+                       state->op->source_dsa_retry = state->op->source_dsa;
+               } else {
+                       status = dreplsrv_partition_find_for_nc(service,
+                                                               NULL, NULL,
+                                                               ldb_dn_get_linearized(nc_root),
+                                                               &p);
+                       if (!W_ERROR_IS_OK(status)) {
+                               DEBUG(2, ("Failed to find requested Naming Context for %s: %s",
+                                         ldb_dn_get_linearized(nc_root),
+                                         win_errstr(status)));
+                               nt_status = werror_to_ntstatus(status);
+                               tevent_req_nterror(req, nt_status);
+                               return;
+                       }
+                       status = dreplsrv_partition_source_dsa_by_guid(p,
+                                                                      &state->op->source_dsa->repsFrom1->source_dsa_obj_guid,
+                                                                      &state->op->source_dsa_retry);
+
+                       if (!W_ERROR_IS_OK(status)) {
+                               struct GUID_txt_buf str;
+                               DEBUG(2, ("Failed to find requested source DSA for %s and %s: %s",
+                                         ldb_dn_get_linearized(nc_root),
+                                         GUID_buf_string(&state->op->source_dsa->repsFrom1->source_dsa_obj_guid, &str),
+                                         win_errstr(status)));
+                               nt_status = werror_to_ntstatus(status);
+                               tevent_req_nterror(req, nt_status);
+                               return;
+                       }
+               }
+
+               /* Find schmea naming context to be synchronized first */
+               status = dreplsrv_partition_find_for_nc(service,
+                                                       NULL, NULL,
+                                                       ldb_dn_get_linearized(schema_dn),
+                                                       &p);
+               if (!W_ERROR_IS_OK(status)) {
+                       DEBUG(2, ("Failed to find requested Naming Context for schema: %s",
+                                 win_errstr(status)));
+                       nt_status = werror_to_ntstatus(status);
+                       tevent_req_nterror(req, nt_status);
+                       return;
+               }
+
+               status = dreplsrv_partition_source_dsa_by_guid(p,
+                                                              &state->op->source_dsa->repsFrom1->source_dsa_obj_guid,
+                                                              &state->op->source_dsa);
+               if (!W_ERROR_IS_OK(status)) {
+                       struct GUID_txt_buf str;
+                       DEBUG(2, ("Failed to find requested source DSA for %s and %s: %s",
+                                 ldb_dn_get_linearized(schema_dn),
+                                 GUID_buf_string(&state->op->source_dsa->repsFrom1->source_dsa_obj_guid, &str),
+                                 win_errstr(status)));
+                       nt_status = werror_to_ntstatus(status);
+                       tevent_req_nterror(req, nt_status);
+                       return;
+               }
+               DEBUG(4,("Wrong schema when applying reply GetNCChanges, retrying\n"));
+
+               dreplsrv_op_pull_source_get_changes_trigger(req);
+               return;
+
+       } else if (!W_ERROR_IS_OK(status)) {
                nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
                DEBUG(0,("Failed to convert objects: %s/%s\n",
                          win_errstr(status), nt_errstr(nt_status)));
@@ -785,6 +871,7 @@ static void dreplsrv_op_pull_source_apply_changes_trigger(struct tevent_req *req
                                                objects,
                                                &state->op->source_dsa->notify_uSN);
        talloc_free(objects);
+
        if (!W_ERROR_IS_OK(status)) {
                nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
                DEBUG(0,("Failed to commit objects: %s/%s\n",
@@ -806,6 +893,19 @@ static void dreplsrv_op_pull_source_apply_changes_trigger(struct tevent_req *req
                return;
        }
 
+       /*
+        * If we had to divert via doing some other thing, such as
+        * pulling the schema, then go back and do the original
+        * operation once we are done.
+        */
+       if (state->op->source_dsa_retry != NULL) {
+               state->op->source_dsa = state->op->source_dsa_retry;
+               state->op->extended_op = state->op->extended_op_retry;
+               state->op->source_dsa_retry = NULL;
+               dreplsrv_op_pull_source_get_changes_trigger(req);
+               return;
+       }
+
        if (state->op->extended_op != DRSUAPI_EXOP_NONE ||
            state->op->service->am_rodc) {
                /*
index edba4c4a49b5c47b9fb124d6fe67b4603e495142..317fa87ee62bb87f2b3391c52ad73d65f88e0f5f 100644 (file)
@@ -130,6 +130,13 @@ struct dreplsrv_out_operation {
        enum drsuapi_DsExtendedError extended_ret;
        dreplsrv_extended_callback_t callback;
        void *cb_data;
+
+       /*
+        * Used when we have to re-try with a different NC, eg for
+        * EXOP retry or to get a current schema first
+        */
+       struct dreplsrv_partition_source_dsa *source_dsa_retry;
+       enum drsuapi_DsExtendedOperation extended_op_retry;
 };
 
 struct dreplsrv_notify_operation {
index 2e3676c81bb072664a80fc1e8cb92076eb972333..44a766b03fe761af22509e4df29b1f71ea9b6380 100644 (file)
@@ -686,7 +686,7 @@ WERROR dsdb_replicated_objects_convert(struct ldb_context *ldb,
                 */
                status = dsdb_schema_info_cmp(schema, mapping_ctr);
                if (!W_ERROR_IS_OK(status)) {
-                       DEBUG(1,("Remote schema has changed while replicating %s\n",
+                       DEBUG(4,("Can't replicate %s because remote schema has changed since we last replicated the schema\n",
                                 ldb_dn_get_linearized(partition_dn)));
                        talloc_free(out);
                        return status;
index 23b2a398640595a4e9458ff7892dc8ab5874361c..73027010f8654c6290694086a1eb8612aaec1884 100644 (file)
@@ -38,6 +38,7 @@ from ldb import (
     FLAG_MOD_ADD,
     FLAG_MOD_REPLACE,
     )
+import ldb
 
 import drs_base
 
@@ -58,6 +59,8 @@ class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
             DrsReplSchemaTestCase.obj_prefix = "DrsReplSchema-%s" % t
 
     def tearDown(self):
+        self._enable_inbound_repl(self.dnsname_dc1)
+        self._enable_inbound_repl(self.dnsname_dc2)
         super(DrsReplSchemaTestCase, self).tearDown()
 
     def _make_obj_names(self, base_name):
@@ -66,10 +69,12 @@ class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
         self.obj_id += 1
         obj_name = "%s-%d-%s" % (self.obj_prefix, self.obj_id, base_name)
         obj_ldn = obj_name.replace("-", "")
-        obj_dn = "CN=%s,%s" % (obj_name, self.schema_dn)
+        obj_dn = ldb.Dn(self.ldb_dc1, "CN=X")
+        obj_dn.add_base(ldb.Dn(self.ldb_dc1, self.schema_dn))
+        obj_dn.set_component(0, "CN", obj_name)
         return (obj_dn, obj_name, obj_ldn)
 
-    def _schema_new_class(self, ldb_ctx, base_name, base_int, attrs=None):
+    def _schema_new_class(self, ldb_ctx, base_name, base_int, oc_cat=1, attrs=None):
         (class_dn, class_name, class_ldn) = self._make_obj_names(base_name)
         rec = {"dn": class_dn,
                "objectClass": ["top", "classSchema"],
@@ -78,7 +83,7 @@ class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
                "governsId": "1.3.6.1.4.1.7165.4.6.2." \
                 + str((100000 * base_int) + random.randint(1,100000)) + ".1.5.13",
                "instanceType": "4",
-               "objectClassCategory": "1",
+               "objectClassCategory": "%d" % oc_cat,
                "subClassOf": "top",
                "systemOnly": "FALSE"}
         # allow overriding/adding attributes
@@ -176,10 +181,12 @@ class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
         # add a base classSchema class so we can use our new
         # attribute in class definition in a sibling class
         (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-A", 7,
+                                               1,
                                                {"systemMayContain": a_ldn,
                                                 "subClassOf": "classSchema"})
         # add new classSchema object with value for a_ldb attribute
         (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-B", 8,
+                                               1,
                                                {"objectClass": ["top", "classSchema", c_ldn],
                                                 a_ldn: "test_classWithCustomAttribute"})
         # force replication from DC1 to DC2
@@ -197,6 +204,37 @@ class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
         # check object is replicated
         self._check_object(a_dn)
 
+    def test_attribute_on_ou(self):
+        """Simple test having an OU with a custome attribute replicated correctly
+
+        This ensures that the server
+        """
+        # disable automatic replication temporary
+        self._disable_inbound_repl(self.dnsname_dc1)
+        self._disable_inbound_repl(self.dnsname_dc2)
+
+       # add new attributeSchema object
+        (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-OU-S", 3)
+        (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-OU-A", 8,
+                                               3,
+                                               {"mayContain": a_ldn})
+        ou_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+        ou_dn.add_base(self.ldb_dc1.get_default_basedn())
+        ou_dn.set_component(0, "OU", a_dn.get_component_value(0))
+        rec = {"dn": ou_dn,
+               "objectClass": ["top", "organizationalUnit", c_ldn],
+               "ou": ou_dn.get_component_value(0),
+               a_ldn: "test OU"}
+        self.ldb_dc1.add(rec)
+
+        # force replication from DC1 to DC2
+        self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.domain_dn)
+        # check objects are replicated
+        self._check_object(c_dn)
+        self._check_object(a_dn)
+        self._check_object(ou_dn)
+        self.ldb_dc1.delete(ou_dn)
+
     def test_all(self):
         """Basic plan is to create bunch of classSchema
            and attributeSchema objects, replicate Schema NC