#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
enum {
- CLK_QSPI_APB = 0,
+ CLK_QSPI_REF = 0,
+ CLK_QSPI_APB,
CLK_QSPI_AHB,
CLK_QSPI_NUM,
};
struct cqspi_st {
struct platform_device *pdev;
struct spi_controller *host;
- struct clk *clk;
- struct clk *clks[CLK_QSPI_NUM];
+ struct clk_bulk_data clks[CLK_QSPI_NUM];
unsigned int sclk;
void __iomem *iobase;
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
- int (*jh7110_clk_init)(struct platform_device *pdev,
- struct cqspi_st *cqspi);
};
/* Operation timeout value */
return 0;
}
-static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi)
-{
- static struct clk_bulk_data qspiclk[] = {
- { .id = "apb" },
- { .id = "ahb" },
- };
-
- int ret = 0;
-
- ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__);
- return ret;
- }
-
- cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk;
- cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk;
-
- ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__);
- return ret;
- }
-
- ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__);
- goto disable_apb_clk;
- }
-
- cqspi->is_jh7110 = true;
-
- return 0;
-
-disable_apb_clk:
- clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
-
- return ret;
-}
-
-static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi)
-{
- clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]);
- clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
-}
static int cqspi_probe(struct platform_device *pdev)
{
const struct cqspi_driver_platdata *ddata;
struct spi_controller *host;
struct resource *res_ahb;
struct cqspi_st *cqspi;
- int ret;
- int irq;
+ int ret, irq;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi));
if (!host)
host->mem_caps = &cqspi_mem_caps;
cqspi = spi_controller_get_devdata(host);
+ if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi"))
+ cqspi->is_jh7110 = true;
cqspi->pdev = pdev;
cqspi->host = host;
- cqspi->is_jh7110 = false;
cqspi->ddata = ddata = of_device_get_match_data(dev);
platform_set_drvdata(pdev, cqspi);
return ret;
}
- /* Obtain QSPI clock. */
- cqspi->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(cqspi->clk)) {
- dev_err(dev, "Cannot claim QSPI clock.\n");
- ret = PTR_ERR(cqspi->clk);
- return ret;
+ /* Obtain QSPI clocks. */
+ ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ if (!cqspi->clks[CLK_QSPI_REF].clk) {
+ dev_err(dev, "Cannot claim mandatory QSPI ref clock.\n");
+ return -ENODEV;
}
/* Obtain and remap controller address. */
if (ret)
return ret;
-
- ret = clk_prepare_enable(cqspi->clk);
+ ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks);
if (ret) {
- dev_err(dev, "Cannot enable QSPI clock.\n");
+ dev_err(dev, "Cannot enable QSPI clocks.\n");
goto disable_rpm;
}
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
dev_err(dev, "Cannot get QSPI reset.\n");
- goto disable_clk;
+ goto disable_clks;
}
rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
if (IS_ERR(rstc_ocp)) {
ret = PTR_ERR(rstc_ocp);
dev_err(dev, "Cannot get QSPI OCP reset.\n");
- goto disable_clk;
+ goto disable_clks;
}
- if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) {
+ if (cqspi->is_jh7110) {
rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref");
if (IS_ERR(rstc_ref)) {
ret = PTR_ERR(rstc_ref);
dev_err(dev, "Cannot get QSPI REF reset.\n");
- goto disable_clk;
+ goto disable_clks;
}
reset_control_assert(rstc_ref);
reset_control_deassert(rstc_ref);
reset_control_assert(rstc_ocp);
reset_control_deassert(rstc_ocp);
- cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clks[CLK_QSPI_REF].clk);
host->max_speed_hz = cqspi->master_ref_clk_hz;
/* write completion is supported by default */
cqspi->slow_sram = true;
if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
cqspi->apb_ahb_hazard = true;
-
- if (ddata->jh7110_clk_init) {
- ret = cqspi_jh7110_clk_init(pdev, cqspi);
- if (ret)
- goto disable_clk;
- }
if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
cqspi->disable_stig_mode = true;
disable_controller:
cqspi_controller_enable(cqspi, 0);
disable_clks:
- if (cqspi->is_jh7110)
- cqspi_jh7110_disable_clk(pdev, cqspi);
-disable_clk:
if (pm_runtime_get_sync(&pdev->dev) >= 0)
- clk_disable_unprepare(cqspi->clk);
+ clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
disable_rpm:
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
pm_runtime_disable(dev);
cqspi_controller_enable(cqspi, 0);
- if (cqspi->is_jh7110)
- cqspi_jh7110_disable_clk(pdev, cqspi);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0)
- clk_disable(cqspi->clk);
+ clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
pm_runtime_put_sync(&pdev->dev);
struct cqspi_st *cqspi = dev_get_drvdata(dev);
cqspi_controller_enable(cqspi, 0);
- clk_disable_unprepare(cqspi->clk);
+ clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
return 0;
}
static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks);
+ if (ret)
+ return ret;
- clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
cqspi_controller_enable(cqspi, 0);
cqspi_controller_init(cqspi);
static const struct cqspi_driver_platdata jh7110_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
- .jh7110_clk_init = cqspi_jh7110_clk_init,
};
static const struct cqspi_driver_platdata pensando_cdns_qspi = {