return p - le32_to_cpu(e->size);
}
-/* Pointer to the one and only smem handle */
-static struct qcom_smem *__smem;
+/*
+ * Pointer to the one and only smem handle.
+ * Init to -EPROBE_DEFER to signal SMEM still has to be probed.
+ * Can be set to -ENODEV if SMEM is not initialized by SBL.
+ */
+static struct qcom_smem *__smem = INIT_ERR_PTR(-EPROBE_DEFER);
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
unsigned long flags;
int ret;
- if (!__smem)
- return -EPROBE_DEFER;
+ if (IS_ERR(__smem))
+ return PTR_ERR(__smem);
if (item < SMEM_ITEM_LAST_FIXED) {
dev_err(__smem->dev,
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_partition *part;
- void *ptr = ERR_PTR(-EPROBE_DEFER);
+ void *ptr;
- if (!__smem)
- return ptr;
+ if (IS_ERR(__smem))
+ return __smem;
if (WARN_ON(item >= __smem->item_count))
return ERR_PTR(-EINVAL);
struct smem_header *header;
unsigned ret;
- if (!__smem)
- return -EPROBE_DEFER;
+ if (IS_ERR(__smem))
+ return PTR_ERR(__smem);
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
part = &__smem->partitions[host];
header = smem->regions[0].virt_base;
if (le32_to_cpu(header->initialized) != 1 ||
le32_to_cpu(header->reserved)) {
- dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
- return -EINVAL;
+ __smem = ERR_PTR(-ENODEV);
+ return dev_err_probe(&pdev->dev, PTR_ERR(__smem), "SMEM is not initialized by SBL\n");
}
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);