static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
const __le64 *used_bits,
const __le64 *target,
+ const __le64 *safe,
unsigned int length)
{
bool differs = false;
unsigned int i;
for (i = 0; i < length; i++) {
- if ((entry[i] & used_bits[i]) != target[i])
+ __le64 used = used_bits[i] & ~safe[i];
+
+ if ((entry[i] & used) != (target[i] & used))
differs = true;
}
return differs;
struct arm_smmu_test_writer *test_writer =
container_of(writer, struct arm_smmu_test_writer, writer);
__le64 *entry_used_bits;
+ __le64 *safe_target;
+ __le64 *safe_init;
entry_used_bits = kunit_kzalloc(
test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
+ safe_target = kunit_kzalloc(test_writer->test,
+ sizeof(*safe_target) * NUM_ENTRY_QWORDS,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test_writer->test, safe_target);
+
+ safe_init = kunit_kzalloc(test_writer->test,
+ sizeof(*safe_init) * NUM_ENTRY_QWORDS,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test_writer->test, safe_init);
+
pr_debug("STE value is now set to: ");
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8,
test_writer->entry,
* configuration.
*/
writer->ops->get_used(test_writer->entry, entry_used_bits);
+ if (writer->ops->get_update_safe)
+ writer->ops->get_update_safe(test_writer->entry,
+ test_writer->init_entry,
+ safe_init);
+ if (writer->ops->get_update_safe)
+ writer->ops->get_update_safe(test_writer->entry,
+ test_writer->target_entry,
+ safe_target);
KUNIT_EXPECT_FALSE(
test_writer->test,
arm_smmu_entry_differs_in_used_bits(
test_writer->entry, entry_used_bits,
- test_writer->init_entry, NUM_ENTRY_QWORDS) &&
+ test_writer->init_entry, safe_init,
+ NUM_ENTRY_QWORDS) &&
arm_smmu_entry_differs_in_used_bits(
test_writer->entry, entry_used_bits,
- test_writer->target_entry,
+ test_writer->target_entry, safe_target,
NUM_ENTRY_QWORDS));
}
}
static const struct arm_smmu_entry_writer_ops test_ste_ops = {
.sync = arm_smmu_test_writer_record_syncs,
.get_used = arm_smmu_get_ste_used,
+ .get_update_safe = arm_smmu_get_ste_update_safe,
};
static const struct arm_smmu_entry_writer_ops test_cd_ops = {
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
+VISIBLE_IF_KUNIT
+void arm_smmu_get_ste_update_safe(const __le64 *cur, const __le64 *target,
+ __le64 *safe_bits)
+{
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_update_safe);
+
/*
* Figure out if we can do a hitless update of entry to become target. Returns a
* bit mask where 1 indicates that qword needs to be set disruptively.
{
__le64 target_used[NUM_ENTRY_QWORDS] = {};
__le64 cur_used[NUM_ENTRY_QWORDS] = {};
+ __le64 safe[NUM_ENTRY_QWORDS] = {};
u8 used_qword_diff = 0;
unsigned int i;
writer->ops->get_used(entry, cur_used);
writer->ops->get_used(target, target_used);
+ if (writer->ops->get_update_safe)
+ writer->ops->get_update_safe(entry, target, safe);
for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
+ /*
+ * Safe is only used for bits that are used by both entries,
+ * otherwise it is sequenced according to the unused entry.
+ */
+ safe[i] &= target_used[i] & cur_used[i];
+
/*
* Check that masks are up to date, the make functions are not
* allowed to set a bit to 1 if the used function doesn't say it
WARN_ON_ONCE(target[i] & ~target_used[i]);
/* Bits can change because they are not currently being used */
+ cur_used[i] &= ~safe[i];
unused_update[i] = (entry[i] & cur_used[i]) |
(target[i] & ~cur_used[i]);
/*
return used_qword_diff;
}
-static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry,
+static void entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry,
const __le64 *target, unsigned int start,
unsigned int len)
{
if (changed)
writer->ops->sync(writer);
- return changed;
}
/*
entry_set(writer, entry, target, 0, 1);
} else {
/*
- * No inuse bit changed. Sanity check that all unused bits are 0
- * in the entry. The target was already sanity checked by
- * compute_qword_diff().
+ * No inuse bit changed, though safe bits may have changed.
*/
- WARN_ON_ONCE(
- entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
+ entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
}
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
.sync = arm_smmu_ste_writer_sync_entry,
.get_used = arm_smmu_get_ste_used,
+ .get_update_safe = arm_smmu_get_ste_update_safe,
};
static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
struct arm_smmu_entry_writer_ops {
void (*get_used)(const __le64 *entry, __le64 *used);
+ void (*get_update_safe)(const __le64 *cur, const __le64 *target,
+ __le64 *safe_bits);
void (*sync)(struct arm_smmu_entry_writer *writer);
};
#if IS_ENABLED(CONFIG_KUNIT)
void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
+void arm_smmu_get_ste_update_safe(const __le64 *cur, const __le64 *target,
+ __le64 *safe_bits);
void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
const __le64 *target);
void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);