diff --git a/arch/arm/boot/dts/st/stm32mp157c-ed1.dts b/arch/arm/boot/dts/st/stm32mp157c-ed1.dts index 66ed5f9921ba14..5075e0ae9bfc91 100644 --- a/arch/arm/boot/dts/st/stm32mp157c-ed1.dts +++ b/arch/arm/boot/dts/st/stm32mp157c-ed1.dts @@ -58,6 +58,18 @@ no-map; }; + mmio0: mmio0@10050000 { + compatible = "shared-dma-pool"; + reg = <0x10050000 0x200>; + no-map; + }; + + virtio0mem: virtio0mem@10050200 { + compatible = "shared-dma-pool"; + reg = <0x10050200 0x7E00>; + no-map; + }; + mcuram: mcuram@30000000 { compatible = "shared-dma-pool"; reg = <0x30000000 0x40000>; @@ -304,6 +316,8 @@ &ipcc { status = "okay"; + #interrupt-cells = <1>; + interrupt-controller; }; &iwdg2 { @@ -313,12 +327,25 @@ &m4_rproc { memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>, - <&vdev0vring1>, <&vdev0buffer>; + <&vdev0vring1>, <&vdev0buffer>, <&mmio0>; mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>; mbox-names = "vq0", "vq1", "shutdown", "detach"; interrupt-parent = <&exti>; interrupts = <68 1>; status = "okay"; + + ranges; + + #address-cells = <1>; + #size-cells = <1>; + mmio@10050000 { + compatible = "virtio,mmio-remoteproc"; + reg = <0x10050000 0x100>; + interrupts-extended = <&ipcc 4>; + memory-region = <&virtio0mem>; + mboxes = <&ipcc 5>; + mbox-names = "sync_mb"; + }; }; &pwr_regulators { diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi index 511113f2e39928..6ed579781b38f5 100644 --- a/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi +++ b/arch/arm/boot/dts/st/stm32mp15xx-dkx.dtsi @@ -48,6 +48,18 @@ no-map; }; + mmio0: mmio0@10050000 { + compatible = "shared-dma-pool"; + reg = <0x10050000 0x200>; + no-map; + }; + + virtio0mem: virtio0mem@10050200 { + compatible = "shared-dma-pool"; + reg = <0x10050200 0x7E00>; + no-map; + }; + mcuram: mcuram@30000000 { compatible = "shared-dma-pool"; reg = <0x30000000 0x40000>; @@ -454,6 +466,8 @@ &ipcc { status = "okay"; + #interrupt-cells = <1>; + interrupt-controller; }; &iwdg2 { @@ -476,12 +490,25 @@ &m4_rproc { memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>, - <&vdev0vring1>, <&vdev0buffer>; + <&vdev0vring1>, <&vdev0buffer>, <&mmio0>; mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>, <&ipcc 3>; mbox-names = "vq0", "vq1", "shutdown", "detach"; interrupt-parent = <&exti>; interrupts = <68 1>; status = "okay"; + + ranges; + + #address-cells = <1>; + #size-cells = <1>; + mmio@10050000 { + compatible = "virtio,mmio-remoteproc"; + reg = <0x10050000 0x100>; + interrupts-extended = <&ipcc 4>; + memory-region = <&virtio0mem>; + mboxes = <&ipcc 5>; + mbox-names = "sync_mb"; + }; }; &pwr_regulators { diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 4b9536f508006d..f58535f31dd62c 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -18,6 +18,7 @@ #include #include #include +#include /** * struct virtio_i2c - virtio I2C data @@ -39,7 +40,8 @@ struct virtio_i2c { * @in_hdr: the IN header of the virtio I2C message */ struct virtio_i2c_req { - struct completion completion; + struct completion *completion; + dma_addr_t dma_handle; struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned; uint8_t *buf ____cacheline_aligned; struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned; @@ -51,7 +53,35 @@ static void virtio_i2c_msg_done(struct virtqueue *vq) unsigned int len; while ((req = virtqueue_get_buf(vq, &len))) - complete(&req->completion); + complete(req->completion); +} + +static void virtio_sg_init(struct scatterlist *sg, void *cpu_addr, unsigned int len) +{ + if (likely(is_vmalloc_addr(cpu_addr))) { + sg_init_table(sg, 1); + sg_set_page(sg, vmalloc_to_page(cpu_addr), len, + offset_in_page(cpu_addr)); + } else { + WARN_ON(!virt_addr_valid(cpu_addr)); + sg_init_one(sg, cpu_addr, len); + } +} + +static void get_dma_buffer(struct device *dma_dev, struct virtio_i2c_req *req, struct i2c_msg *msg) +{ + req->buf = dma_alloc_coherent(dma_dev, msg->len, &req->dma_handle, GFP_KERNEL); + + if (req->buf) + memcpy(req->buf, msg->buf, msg->len); +} + +static void put_dma_buffer(struct device *dma_dev, struct virtio_i2c_req *req, struct i2c_msg *msg) +{ + if (msg->flags & I2C_M_RD) + memcpy(msg->buf, req->buf, msg->len); + + dma_free_coherent(dma_dev, msg->len, req->buf, req->dma_handle); } static int virtio_i2c_prepare_reqs(struct virtqueue *vq, @@ -64,7 +94,11 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, for (i = 0; i < num; i++) { int outcnt = 0, incnt = 0; - init_completion(&reqs[i].completion); + reqs[i].completion = kcalloc(1, sizeof(struct completion), GFP_KERNEL); + if (!reqs[i].completion) + break; + + init_completion(reqs[i].completion); /* * Only 7-bit mode supported for this moment. For the address @@ -78,15 +112,18 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, if (i != num - 1) reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT); - sg_init_one(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr)); + virtio_sg_init(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr)); sgs[outcnt++] = &out_hdr; if (msgs[i].len) { - reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1); - if (!reqs[i].buf) + get_dma_buffer(vq->vdev->dev.parent, &reqs[i], &msgs[i]); + + if (!reqs[i].buf) { + kfree(reqs[i].completion); break; + } - sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len); + virtio_sg_init(&msg_buf, reqs[i].buf, msgs[i].len); if (msgs[i].flags & I2C_M_RD) sgs[outcnt + incnt++] = &msg_buf; @@ -94,11 +131,14 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, sgs[outcnt++] = &msg_buf; } - sg_init_one(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr)); + virtio_sg_init(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr)); sgs[outcnt + incnt++] = &in_hdr; if (virtqueue_add_sgs(vq, sgs, outcnt, incnt, &reqs[i], GFP_KERNEL)) { - i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], false); + put_dma_buffer(vq->vdev->dev.parent, &reqs[i], &msgs[i]); + + kfree(reqs[i].completion); + break; } } @@ -116,12 +156,14 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, for (i = 0; i < num; i++) { struct virtio_i2c_req *req = &reqs[i]; - wait_for_completion(&req->completion); + wait_for_completion(req->completion); if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK) failed = true; - i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed); + put_dma_buffer(vq->vdev->dev.parent, req, &msgs[i]); + + kfree(req->completion); if (!failed) j++; @@ -137,8 +179,12 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, struct virtqueue *vq = vi->vq; struct virtio_i2c_req *reqs; int count; + dma_addr_t dma_handle; + + reqs = dma_alloc_coherent(vq->vdev->dev.parent, + sizeof(struct virtio_i2c_req) * num, + &dma_handle, GFP_KERNEL); - reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL); if (!reqs) return -ENOMEM; @@ -159,7 +205,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, count = virtio_i2c_complete_reqs(vq, reqs, msgs, count); err_free: - kfree(reqs); + dma_free_coherent(vq->vdev->dev.parent, + sizeof(struct virtio_i2c_req) * num, + reqs, dma_handle); return count; } diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index 15d538fe2113b0..0777f571dc0cef 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c @@ -9,10 +9,12 @@ #include #include #include +#include #include #include #include #include +#include #define IPCC_XCR 0x000 #define XCR_RXOIE BIT(0) @@ -45,11 +47,22 @@ enum { IPCC_IRQ_NUM, }; +struct stm32_ipcc_ch { + struct mbox_chan *mbox; + struct work_struct rx_work; + unsigned long chan; + u32 irq_mask; /* 1: activated 0: disabled */ + bool irq_ctx; +}; + struct stm32_ipcc { struct mbox_controller controller; void __iomem *reg_base; void __iomem *reg_proc; struct clk *clk; + struct stm32_ipcc_ch *chnl; + struct irq_domain *irqd; + struct workqueue_struct *workqueue; spinlock_t lock; /* protect access to IPCC registers */ int irqs[IPCC_IRQ_NUM]; u32 proc_id; @@ -59,7 +72,7 @@ struct stm32_ipcc { }; static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg, - u32 mask) + u32 mask) { unsigned long flags; @@ -69,7 +82,7 @@ static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg, } static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg, - u32 mask) + u32 mask) { unsigned long flags; @@ -78,10 +91,21 @@ static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg, spin_unlock_irqrestore(lock, flags); } +/* + * Message receiver(workqueue) + */ +static void stm32_ipcc_rx_work(struct work_struct *work) +{ + struct stm32_ipcc_ch *chnl = container_of(work, struct stm32_ipcc_ch, rx_work); + + mbox_chan_received_data(chnl->mbox, NULL); +}; + static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data) { struct stm32_ipcc *ipcc = data; struct device *dev = ipcc->controller.dev; + struct stm32_ipcc_ch *chnl; u32 status, mr, tosr, chan; irqreturn_t ret = IRQ_NONE; int proc_offset; @@ -100,10 +124,27 @@ static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data) dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan); - mbox_chan_received_data(&ipcc->controller.chans[chan], NULL); + chnl = &ipcc->chnl[chan]; + + /* + * Depending on the DT parameter,call the client under interrupt context + * or use workqueue to call the callback in normal context + */ + + if (chnl->irq_ctx) { + if (!chnl->irq_mask) { + local_irq_disable(); + handle_simple_irq(irq_to_desc(irq_find_mapping(ipcc->irqd, + chnl->chan))); + local_irq_enable(); + } + } else { + dev_dbg(dev, "%s: WK chan:%d rx\n", __func__, chan); + queue_work(ipcc->workqueue, &chnl->rx_work); + } stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR, - RX_BIT_CHAN(chan)); + RX_BIT_CHAN(chan)); ret = IRQ_HANDLED; } @@ -132,7 +173,7 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data) /* mask 'tx channel free' interrupt */ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, - TX_BIT_CHAN(chan)); + TX_BIT_CHAN(chan)); mbox_chan_txdone(&ipcc->controller.chans[chan], 0); @@ -142,28 +183,85 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data) return ret; } +static void stm32_ipcc_mask_irq(struct irq_data *d) +{ + struct stm32_ipcc_ch *chn = irq_data_get_irq_chip_data(d); + + chn->irq_mask = 1; +} + +static void stm32_ipcc_unmask_irq(struct irq_data *d) +{ + struct stm32_ipcc_ch *chn = irq_data_get_irq_chip_data(d); + + chn->irq_mask = 0; +} + +static struct irq_chip stm32_ipcc_chip = { + .name = "irq-rpmsg", + .irq_mask = stm32_ipcc_mask_irq, + .irq_unmask = stm32_ipcc_unmask_irq, +}; + +static int stm32_ipcc_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) +{ + struct stm32_ipcc *ipcc = d->host_data; + struct mbox_controller *mbox = &ipcc->controller; + struct stm32_ipcc_ch *chnl; + + pr_err("APO: %s: %d", __func__, __LINE__); + + if (hw >= ipcc->n_chans) + return -EINVAL; + + chnl = &ipcc->chnl[hw]; + + if (chnl->mbox) { + dev_err(mbox->dev, "chan:%lu already used\n", chnl->chan); + return -EINVAL; + } + + chnl->mbox = &mbox->chans[hw]; + chnl->chan = hw; + + chnl->irq_ctx = true; + + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &stm32_ipcc_chip, handle_level_irq); + irq_set_chip_data(virq, chnl); + + /* unmask 'rx channel occupied' interrupt */ + stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, + RX_BIT_CHAN(hw)); + + return 0; +} + static int stm32_ipcc_send_data(struct mbox_chan *link, void *data) { - unsigned long chan = (unsigned long)link->con_priv; + struct stm32_ipcc_ch *chnl = (struct stm32_ipcc_ch *)link->con_priv; + unsigned long chan = chnl->chan; struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc, - controller); + controller); dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan); /* set channel n occupied */ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR, - TX_BIT_CHAN(chan)); + TX_BIT_CHAN(chan)); /* unmask 'tx channel free' interrupt */ stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, - TX_BIT_CHAN(chan)); + TX_BIT_CHAN(chan)); return 0; } static int stm32_ipcc_startup(struct mbox_chan *link) { - unsigned long chan = (unsigned long)link->con_priv; + struct stm32_ipcc_ch *chnl = (struct stm32_ipcc_ch *)link->con_priv; + unsigned long chan = chnl->chan; struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc, controller); int ret; @@ -183,7 +281,8 @@ static int stm32_ipcc_startup(struct mbox_chan *link) static void stm32_ipcc_shutdown(struct mbox_chan *link) { - unsigned long chan = (unsigned long)link->con_priv; + struct stm32_ipcc_ch *chnl = (struct stm32_ipcc_ch *)link->con_priv; + unsigned long chan = chnl->chan; struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc, controller); @@ -191,6 +290,9 @@ static void stm32_ipcc_shutdown(struct mbox_chan *link) stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan)); + if (!chnl->irq_ctx) + flush_work(&chnl->rx_work); + clk_disable_unprepare(ipcc->clk); } @@ -200,6 +302,31 @@ static const struct mbox_chan_ops stm32_ipcc_ops = { .shutdown = stm32_ipcc_shutdown, }; +static struct mbox_chan *stm32_ipcc_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + int ind = sp->args[0]; + struct stm32_ipcc_ch *chnl; + + pr_err("APO: %s: %d", __func__, __LINE__); + if (ind >= mbox->num_chans) + return ERR_PTR(-EINVAL); + + chnl = (struct stm32_ipcc_ch *)mbox->chans[ind].con_priv; + chnl->mbox = &mbox->chans[ind]; + chnl->chan = ind; + + if (!chnl->irq_ctx) + INIT_WORK(&chnl->rx_work, stm32_ipcc_rx_work); + + return &mbox->chans[ind]; +} + +static const struct irq_domain_ops stm32_ipcc_domain_ops = { + .map = stm32_ipcc_map, + .xlate = irq_domain_xlate_onecell, +}; + static int stm32_ipcc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -292,34 +419,58 @@ static int stm32_ipcc_probe(struct platform_device *pdev) ipcc->controller.dev = dev; ipcc->controller.txdone_irq = true; ipcc->controller.ops = &stm32_ipcc_ops; + ipcc->controller.of_xlate = &stm32_ipcc_xlate; ipcc->controller.num_chans = ipcc->n_chans; ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans, - sizeof(*ipcc->controller.chans), - GFP_KERNEL); + sizeof(*ipcc->controller.chans), + GFP_KERNEL); if (!ipcc->controller.chans) { ret = -ENOMEM; goto err_irq_wkp; } + ipcc->chnl = devm_kcalloc(dev, ipcc->controller.num_chans, + sizeof(*ipcc->chnl), GFP_KERNEL); + if (!ipcc->chnl) { + ret = -ENOMEM; + goto err_irq_wkp; + } + for (i = 0; i < ipcc->controller.num_chans; i++) - ipcc->controller.chans[i].con_priv = (void *)i; + ipcc->controller.chans[i].con_priv = (void *)&ipcc->chnl[i]; + + ipcc->irqd = irq_domain_create_linear(dev->fwnode, ipcc->n_chans, &stm32_ipcc_domain_ops, + ipcc); + if (!ipcc->irqd) { + dev_err(dev, "Failed to create IRQ domain\n"); + goto err_irq_wkp; + } ret = devm_mbox_controller_register(dev, &ipcc->controller); if (ret) - goto err_irq_wkp; + goto err_irq_domain; + + ipcc->workqueue = create_workqueue(dev_name(dev)); + if (!ipcc->workqueue) { + dev_err(dev, "cannot create workqueue\n"); + ret = -ENOMEM; + goto err_irq_domain; + } platform_set_drvdata(pdev, ipcc); ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER); dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n", - FIELD_GET(VER_MAJREV_MASK, ip_ver), - FIELD_GET(VER_MINREV_MASK, ip_ver), - ipcc->controller.num_chans, ipcc->proc_id); + FIELD_GET(VER_MAJREV_MASK, ip_ver), + FIELD_GET(VER_MINREV_MASK, ip_ver), + ipcc->controller.num_chans, ipcc->proc_id); clk_disable_unprepare(ipcc->clk); return 0; +err_irq_domain: + irq_domain_remove(ipcc->irqd); err_irq_wkp: if (of_property_read_bool(np, "wakeup-source")) dev_pm_clear_wake_irq(dev); @@ -333,10 +484,15 @@ static int stm32_ipcc_probe(struct platform_device *pdev) static int stm32_ipcc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct stm32_ipcc *ipcc = dev_get_drvdata(dev); + + irq_domain_remove(ipcc->irqd); if (of_property_read_bool(dev->of_node, "wakeup-source")) dev_pm_clear_wake_irq(&pdev->dev); + destroy_workqueue(ipcc->workqueue); + device_set_wakeup_capable(dev, false); return 0; diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 695cce218e8c64..2fba93340c3bb2 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "remoteproc_internal.h" @@ -1263,6 +1264,31 @@ void rproc_resource_cleanup(struct rproc *rproc) } EXPORT_SYMBOL(rproc_resource_cleanup); +static const struct of_device_id rproc_child_match_table[] = { + { .compatible = "virtio,mmio", }, + { } +}; + +static int rproc_platform_populate(struct rproc *rproc) +{ + struct device *dev = rproc->dev.parent; + int ret; + + ret = of_platform_populate(dev->of_node, rproc_child_match_table, NULL, dev); + if (ret < 0) { + dev_err(dev, "failed to populate child devices (%d)\n", ret); + + goto depopulate; + } + + return 0; + +depopulate: + of_platform_depopulate(dev); + + return ret; +} + static int rproc_start(struct rproc *rproc, const struct firmware *fw) { struct resource_table *loaded_table; @@ -1304,6 +1330,11 @@ static int rproc_start(struct rproc *rproc, const struct firmware *fw) goto unprepare_subdevices; } + /* probe remoteproc device resource suppliers */ + ret = rproc_platform_populate(rproc); + if (ret) + goto depopulate; + /* Start any subdevices for the remote processor */ ret = rproc_start_subdevices(rproc); if (ret) { @@ -1320,6 +1351,8 @@ static int rproc_start(struct rproc *rproc, const struct firmware *fw) stop_rproc: rproc->ops->stop(rproc); +depopulate: + of_platform_depopulate(rproc->dev.parent); unprepare_subdevices: rproc_unprepare_subdevices(rproc); reset_table_ptr: @@ -1348,6 +1381,11 @@ static int __rproc_attach(struct rproc *rproc) goto unprepare_subdevices; } + /* probe remoteproc device resource suppliers */ + ret = rproc_platform_populate(rproc); + if (ret) + goto depopulate; + /* Start any subdevices for the remote processor */ ret = rproc_start_subdevices(rproc); if (ret) { @@ -1364,6 +1402,8 @@ static int __rproc_attach(struct rproc *rproc) stop_rproc: rproc->ops->stop(rproc); +depopulate: + of_platform_depopulate(rproc->dev.parent); unprepare_subdevices: rproc_unprepare_subdevices(rproc); out: @@ -1722,6 +1762,7 @@ static int rproc_stop(struct rproc *rproc, bool crashed) return ret; } + of_platform_depopulate(rproc->dev.parent); /* power off the remote processor */ ret = rproc->ops->stop(rproc); @@ -1761,6 +1802,8 @@ static int __rproc_detach(struct rproc *rproc) return ret; } + of_platform_depopulate(rproc->dev.parent); + /* Tell the remote processor the core isn't available anymore */ ret = rproc->ops->detach(rproc); if (ret) { @@ -2575,6 +2618,8 @@ int rproc_del(struct rproc *rproc) /* Ensure that no readers of rproc_list are still active */ synchronize_rcu(); + of_platform_depopulate(rproc->dev.parent); + device_del(&rproc->dev); rproc_char_device_remove(rproc); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 97760f61129594..5924b3c4af8488 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -70,8 +70,10 @@ #include #include #include - - +#include +#include +#include +#include /* The alignment to use between consumer and producer parts of vring. * Currently hardcoded to the page size. */ @@ -86,6 +88,10 @@ struct virtio_mmio_device { struct virtio_device vdev; struct platform_device *pdev; + void (*interrupt_cb)(struct virtio_mmio_device *vm_dev); + struct mbox_chan *chan; + struct mbox_client cl; + void __iomem *base; unsigned long version; @@ -102,7 +108,11 @@ struct virtio_mmio_vq_info { struct list_head node; }; - +static void vm_send_it(struct virtio_mmio_device *vm_dev) +{ + if (vm_dev->interrupt_cb) + vm_dev->interrupt_cb(vm_dev); +} /* Configuration interface */ @@ -112,10 +122,12 @@ static u64 vm_get_features(struct virtio_device *vdev) u64 features; writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + vm_send_it(vm_dev); features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); features <<= 32; writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + vm_send_it(vm_dev); features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); return features; @@ -139,10 +151,12 @@ static int vm_finalize_features(struct virtio_device *vdev) writel((u32)(vdev->features >> 32), vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); + vm_send_it(vm_dev); writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); writel((u32)vdev->features, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); + vm_send_it(vm_dev); return 0; } @@ -261,6 +275,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status) * before writing to the MMIO region. */ writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); + vm_send_it(vm_dev); } static void vm_reset(struct virtio_device *vdev) @@ -269,6 +284,7 @@ static void vm_reset(struct virtio_device *vdev) /* 0 status means a reset. */ writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); + vm_send_it(vm_dev); } @@ -283,6 +299,7 @@ static bool vm_notify(struct virtqueue *vq) /* We write the queue's selector into the notification register to * signal the other end */ writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + vm_send_it(vm_dev); return true; } @@ -341,8 +358,10 @@ static void vm_del_vq(struct virtqueue *vq) writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); if (vm_dev->version == 1) { writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + vm_send_it(vm_dev); } else { writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + vm_send_it(vm_dev); WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); } @@ -391,6 +410,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in /* Select the queue we're interested in */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + vm_send_it(vm_dev); /* Queue shouldn't already be set up. */ if (readl(vm_dev->base + (vm_dev->version == 1 ? @@ -442,6 +462,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + vm_send_it(vm_dev); } else { u64 addr; @@ -450,17 +471,24 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in writel((u32)(addr >> 32), vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); + dev_info(&vdev->dev, "Descs: %x", addr); + addr = virtqueue_get_avail_addr(vq); writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); writel((u32)(addr >> 32), vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); + dev_info(&vdev->dev, "Driver: %x", addr); + addr = virtqueue_get_used_addr(vq); writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); writel((u32)(addr >> 32), vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); + dev_info(&vdev->dev, "Device: %x", addr); + writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + vm_send_it(vm_dev); } vq->priv = info; @@ -542,6 +570,7 @@ static bool vm_get_shm_region(struct virtio_device *vdev, /* Select the region we're interested in */ writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL); + vm_send_it(vm_dev); /* Read the region size */ len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW); len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32; @@ -611,7 +640,65 @@ static void virtio_mmio_release_dev(struct device *_d) kfree(vm_dev); } -/* Platform device */ +static int virtio_mmio_of_parse_mem(struct virtio_mmio_device *rvdev) +{ + struct device *dev = &rvdev->pdev->dev; + struct of_phandle_iterator it; + struct reserved_mem *rmem; + + /* Register associated reserved memory region */ + of_phandle_iterator_init(&it, dev->of_node, "memory-region", NULL, 0); + + while (of_phandle_iterator_next(&it) == 0) { + rmem = of_reserved_mem_lookup(it.node); + + if (!rmem) { + dev_err(dev, "unable to acquire memory-region %s\n", it.node->name); + continue; + } + + return dma_declare_coherent_memory(dev, rmem->base, rmem->base, rmem->size); + } + return 0; +} + +void mmio_remoteproc_interrupt(struct virtio_mmio_device *vm_dev) +{ + if (mbox_send_message(vm_dev->chan, "interrupt") < 0) + dev_err(&vm_dev->pdev->dev, "fail to send message to mailboxe"); +} + +static int mmio_rproc_request_mbox(struct virtio_mmio_device *vm_dev) +{ + struct platform_device *pdev = vm_dev->pdev; + struct device *dev = &pdev->dev; + const unsigned char *name = "sync_mb"; + int irq, err; + + vm_dev->cl.tx_block = 1; + vm_dev->cl.tx_done = NULL; + vm_dev->cl.tx_tout = 500; + vm_dev->cl.dev = dev; + + vm_dev->chan = mbox_request_channel_byname(&vm_dev->cl, name); + + if (IS_ERR(vm_dev->chan)) + return dev_err_probe(dev, + PTR_ERR(vm_dev->chan), + "failed to request mailbox %s\n", name); + + /* + * The mailbox RX is managed in interrupt context + * disable the mailbox rx callback + */ + vm_dev->cl.rx_callback = NULL; + + return 0; + +err_probe: + mbox_free_channel(vm_dev->chan); + return -EPROBE_DEFER; +} static int virtio_mmio_probe(struct platform_device *pdev) { @@ -634,6 +721,14 @@ static int virtio_mmio_probe(struct platform_device *pdev) if (IS_ERR(vm_dev->base)) return PTR_ERR(vm_dev->base); + if (of_device_is_compatible(pdev->dev.of_node, "virtio,mmio-remoteproc")) { + vm_dev->interrupt_cb = &mmio_remoteproc_interrupt; + rc = mmio_rproc_request_mbox(vm_dev); + + if (rc) + return rc; + } + /* Check magic value */ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { @@ -659,6 +754,11 @@ static int virtio_mmio_probe(struct platform_device *pdev) } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); + rc = virtio_mmio_of_parse_mem(vm_dev); + + if (rc) + return rc; + if (vm_dev->version == 1) { writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); @@ -691,6 +791,8 @@ static int virtio_mmio_remove(struct platform_device *pdev) { struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); unregister_virtio_device(&vm_dev->vdev); + mbox_free_channel(vm_dev->chan); + dma_release_coherent_memory(&vm_dev->pdev->dev); return 0; } @@ -824,7 +926,7 @@ static void vm_unregister_cmdline_devices(void) static const struct of_device_id virtio_mmio_match[] = { { .compatible = "virtio,mmio", }, - {}, + { .compatible = "virtio,mmio-remoteproc", }, }; MODULE_DEVICE_TABLE(of, virtio_mmio_match); diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 18aade195884d7..0e398d08d6a274 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -104,7 +104,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, if (addr == DMA_MAPPING_ERROR) return false; if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && - min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) + (phys_addr_t)-1 != phys_to_dma(dev, PFN_PHYS(min_low_pfn)) && + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) return false; return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);