From 158c39949029ac5bd6aaa4b59140f13d0d60594c Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 15 Dec 2025 12:12:04 +0000 Subject: [PATCH 01/17] RDMA/mana_ib: Add device statistics support jira LE-4478 commit-author Shiraz Saleem commit baa640d924e55eee9210164ac068ad32dbd69c20 Add support for mana device level statistics. Co-developed-by: Solom Tamawy Signed-off-by: Solom Tamawy Signed-off-by: Shiraz Saleem Signed-off-by: Konstantin Taranov Link: https://patch.msgid.link/1749559717-3424-1-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Long Li Signed-off-by: Leon Romanovsky (cherry picked from commit baa640d924e55eee9210164ac068ad32dbd69c20) Signed-off-by: Shreeya Patel --- drivers/infiniband/hw/mana/counters.c | 60 ++++++++++++++++++++++++++- drivers/infiniband/hw/mana/counters.h | 10 +++++ drivers/infiniband/hw/mana/device.c | 6 +++ drivers/infiniband/hw/mana/mana_ib.h | 19 +++++++++ 4 files changed, 93 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c index e533ce21013db..6a81365d3b951 100644 --- a/drivers/infiniband/hw/mana/counters.c +++ b/drivers/infiniband/hw/mana/counters.c @@ -34,6 +34,22 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = { [MANA_IB_CURRENT_RATE].name = "current_rate", }; +static const struct rdma_stat_desc mana_ib_device_stats_desc[] = { + [MANA_IB_SENT_CNPS].name = "sent_cnps", + [MANA_IB_RECEIVED_ECNS].name = "received_ecns", + [MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count", + [MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events", + [MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events", + [MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events", +}; + +struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev) +{ + return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc, + ARRAY_SIZE(mana_ib_device_stats_desc), + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} + struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { @@ -42,8 +58,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev, RDMA_HW_STATS_DEFAULT_LIFESPAN); } -int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, - u32 port_num, int index) +static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats) +{ + struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, + ib_dev); + struct mana_rnic_query_device_cntrs_resp resp = {}; + struct mana_rnic_query_device_cntrs_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS, + sizeof(req), sizeof(resp)); + req.hdr.dev_id = mdev->gdma_dev->dev_id; + req.adapter = mdev->adapter_handle; + + err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req, + sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d", + err); + return err; + } + + stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps; + stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns; + stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count; + stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events; + stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events; + stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events; + + return ARRAY_SIZE(mana_ib_device_stats_desc); +} + +static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num) { struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); @@ -103,3 +150,12 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, return ARRAY_SIZE(mana_ib_port_stats_desc); } + +int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index) +{ + if (!port_num) + return mana_ib_get_hw_device_stats(ibdev, stats); + else + return mana_ib_get_hw_port_stats(ibdev, stats, port_num); +} diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h index 7ff92d27f6c3a..987a6fee83c94 100644 --- a/drivers/infiniband/hw/mana/counters.h +++ b/drivers/infiniband/hw/mana/counters.h @@ -37,8 +37,18 @@ enum mana_ib_port_counters { MANA_IB_CURRENT_RATE, }; +enum mana_ib_device_counters { + MANA_IB_SENT_CNPS, + MANA_IB_RECEIVED_ECNS, + MANA_IB_RECEIVED_CNP_COUNT, + MANA_IB_QP_CONGESTED_EVENTS, + MANA_IB_QP_RECOVERED_EVENTS, + MANA_IB_DEV_RATE_INC_EVENTS, +}; + struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num); +struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev); int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port_num, int index); #endif /* _COUNTERS_H_ */ diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c index 165c0a1e67d14..65d0af740e5a8 100644 --- a/drivers/infiniband/hw/mana/device.c +++ b/drivers/infiniband/hw/mana/device.c @@ -65,6 +65,10 @@ static const struct ib_device_ops mana_ib_stats_ops = { .get_hw_stats = mana_ib_get_hw_stats, }; +static const struct ib_device_ops mana_ib_device_stats_ops = { + .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats, +}; + static int mana_ib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { @@ -153,6 +157,8 @@ static int mana_ib_probe(struct auxiliary_device *adev, } ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops); + if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT) + ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops); ret = mana_ib_create_eqs(dev); if (ret) { diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 42bebd6cd4f7a..eddd0a83b97ee 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -210,6 +210,7 @@ enum mana_ib_command_code { MANA_IB_DESTROY_RC_QP = 0x3000b, MANA_IB_SET_QP_STATE = 0x3000d, MANA_IB_QUERY_VF_COUNTERS = 0x30022, + MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023, }; struct mana_ib_query_adapter_caps_req { @@ -218,6 +219,7 @@ struct mana_ib_query_adapter_caps_req { enum mana_ib_adapter_features { MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4), + MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5), }; struct mana_ib_query_adapter_caps_resp { @@ -516,6 +518,23 @@ struct mana_rnic_query_vf_cntrs_resp { u64 current_rate; }; /* HW Data */ +struct mana_rnic_query_device_cntrs_req { + struct gdma_req_hdr hdr; + mana_handle_t adapter; +}; /* HW Data */ + +struct mana_rnic_query_device_cntrs_resp { + struct gdma_resp_hdr hdr; + u32 sent_cnps; + u32 received_ecns; + u32 reserved1; + u32 received_cnp_count; + u32 qp_congested_events; + u32 qp_recovered_events; + u32 rate_inc_events; + u32 reserved2; +}; /* HW Data */ + static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) { return mdev->gdma_dev->gdma_context; From b2fb715ad6db31f5c0f5781c9dd64d78f850b041 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 15 Dec 2025 13:01:57 +0000 Subject: [PATCH 02/17] PCI/MSI: Export pci_msix_prepare_desc() for dynamic MSI-X allocations jira LE-4467 commit-author Shradha Gupta commit 5da8a8b8090b5f79a816ba016af3a70a9d7287bf For supporting dynamic MSI-X vector allocation by PCI controllers, enabling the flag MSI_FLAG_PCI_MSIX_ALLOC_DYN is not enough, msix_prepare_msi_desc() to prepare the MSI descriptor is also needed. Export pci_msix_prepare_desc() to allow PCI controllers to support dynamic MSI-X vector allocation. Signed-off-by: Shradha Gupta Reviewed-by: Haiyang Zhang Reviewed-by: Thomas Gleixner Reviewed-by: Saurabh Sengar Acked-by: Bjorn Helgaas (cherry picked from commit 5da8a8b8090b5f79a816ba016af3a70a9d7287bf) Signed-off-by: Shreeya Patel --- drivers/pci/msi/irqdomain.c | 5 +++-- include/linux/msi.h | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index d7ba8795d60f8..43129aa6d6c75 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -222,13 +222,14 @@ static void pci_irq_unmask_msix(struct irq_data *data) pci_msix_unmask(irq_data_get_msi_desc(data)); } -static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, - struct msi_desc *desc) +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc) { /* Don't fiddle with preallocated MSI descriptors */ if (!desc->pci.mask_base) msix_prepare_msi_desc(to_pci_dev(desc->dev), desc); } +EXPORT_SYMBOL_GPL(pci_msix_prepare_desc); static const struct msi_domain_template pci_msix_template = { .chip = { diff --git a/include/linux/msi.h b/include/linux/msi.h index 8d97b890faeca..b3d17b7ebdbec 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -679,6 +679,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct irq_domain *parent); u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc); #else /* CONFIG_PCI_MSI */ static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { From fffef290a9748d5ef37983079dcee6c02bf485a6 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 15 Dec 2025 13:02:20 +0000 Subject: [PATCH 03/17] PCI: hv: Allow dynamic MSI-X vector allocation jira LE-4467 commit-author Shradha Gupta commit ad518f2557b971976fc9d99a6a8cd2b453742bf9 Allow dynamic MSI-X vector allocation for pci_hyperv PCI controller by adding support for the flag MSI_FLAG_PCI_MSIX_ALLOC_DYN and using pci_msix_prepare_desc() to prepare the MSI-X descriptors. Feature support added for both x86 and ARM64 Signed-off-by: Shradha Gupta Reviewed-by: Haiyang Zhang Reviewed-by: Saurabh Sengar Acked-by: Bjorn Helgaas (cherry picked from commit ad518f2557b971976fc9d99a6a8cd2b453742bf9) Signed-off-by: Shreeya Patel --- drivers/pci/controller/pci-hyperv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 37fac5a42587b..3312a9cb13889 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -2061,6 +2061,7 @@ static struct irq_chip hv_msi_irq_chip = { static struct msi_domain_ops hv_msi_ops = { .msi_prepare = hv_msi_prepare, .msi_free = hv_msi_free, + .prepare_desc = pci_msix_prepare_desc, }; /** @@ -2082,7 +2083,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) hbus->msi_info.ops = &hv_msi_ops; hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX); + MSI_FLAG_PCI_MSIX | MSI_FLAG_PCI_MSIX_ALLOC_DYN); hbus->msi_info.handler = FLOW_HANDLER; hbus->msi_info.handler_name = FLOW_NAME; hbus->msi_info.data = hbus; From 23e1c96bfec5a8d87724ac9c9cd9c461b3728db3 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 15 Dec 2025 13:02:42 +0000 Subject: [PATCH 04/17] net: mana: explain irq_setup() algorithm jira LE-4467 commit-author Yury Norov commit 4607617af1b4747df0284ea8c1ddcecb21cae528 Commit 91bfe210e196 ("net: mana: add a function to spread IRQs per CPUs") added the irq_setup() function that distributes IRQs on CPUs according to a tricky heuristic. The corresponding commit message explains the heuristic. Duplicate it in the source code to make available for readers without digging git in history. Also, add more detailed explanation about how the heuristics is implemented. Signed-off-by: Yury Norov Signed-off-by: Shradha Gupta (cherry picked from commit 4607617af1b4747df0284ea8c1ddcecb21cae528) Signed-off-by: Shreeya Patel --- .../net/ethernet/microsoft/mana/gdma_main.c | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 7cb9360c05156..d072368127d6d 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1445,6 +1445,47 @@ void mana_gd_free_res_map(struct gdma_resource *r) r->size = 0; } +/* + * Spread on CPUs with the following heuristics: + * + * 1. No more than one IRQ per CPU, if possible; + * 2. NUMA locality is the second priority; + * 3. Sibling dislocality is the last priority. + * + * Let's consider this topology: + * + * Node 0 1 + * Core 0 1 2 3 + * CPU 0 1 2 3 4 5 6 7 + * + * The most performant IRQ distribution based on the above topology + * and heuristics may look like this: + * + * IRQ Nodes Cores CPUs + * 0 1 0 0-1 + * 1 1 1 2-3 + * 2 1 0 0-1 + * 3 1 1 2-3 + * 4 2 2 4-5 + * 5 2 3 6-7 + * 6 2 2 4-5 + * 7 2 3 6-7 + * + * The heuristics is implemented as follows. + * + * The outer for_each() loop resets the 'weight' to the actual number + * of CPUs in the hop. Then inner for_each() loop decrements it by the + * number of sibling groups (cores) while assigning first set of IRQs + * to each group. IRQs 0 and 1 above are distributed this way. + * + * Now, because NUMA locality is more important, we should walk the + * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's + * implemented by the medium while() loop. We do like this unless the + * number of IRQs assigned on this hop will not become equal to number + * of CPUs in the hop (weight == 0). Then we switch to the next hop and + * do the same thing. + */ + static int irq_setup(unsigned int *irqs, unsigned int len, int node) { const struct cpumask *next, *prev = cpu_none_mask; From 6109dd6ea2b5a17270d3009a93f6ef78dccc5ab8 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 15 Dec 2025 13:03:01 +0000 Subject: [PATCH 05/17] net: mana: Allow irq_setup() to skip cpus for affinity jira LE-4467 commit-author Shradha Gupta commit 845c62c543d6bd5d8b80f53835997789e4bb8e29 In order to prepare the MANA driver to allocate the MSI-X IRQs dynamically, we need to enhance irq_setup() to allow skipping affinitizing IRQs to the first CPU sibling group. This would be for cases when the number of IRQs is less than or equal to the number of online CPUs. In such cases for dynamically added IRQs the first CPU sibling group would already be affinitized with HWC IRQ. Signed-off-by: Shradha Gupta Reviewed-by: Haiyang Zhang Reviewed-by: Yury Norov [NVIDIA] (cherry picked from commit 845c62c543d6bd5d8b80f53835997789e4bb8e29) Signed-off-by: Shreeya Patel --- drivers/net/ethernet/microsoft/mana/gdma_main.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index d072368127d6d..e9c87ca27cc5c 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1486,7 +1486,8 @@ void mana_gd_free_res_map(struct gdma_resource *r) * do the same thing. */ -static int irq_setup(unsigned int *irqs, unsigned int len, int node) +static int irq_setup(unsigned int *irqs, unsigned int len, int node, + bool skip_first_cpu) { const struct cpumask *next, *prev = cpu_none_mask; cpumask_var_t cpus __free(free_cpumask_var); @@ -1501,11 +1502,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node) while (weight > 0) { cpumask_andnot(cpus, next, prev); for_each_cpu(cpu, cpus) { + cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); + --weight; + + if (unlikely(skip_first_cpu)) { + skip_first_cpu = false; + continue; + } + if (len-- == 0) goto done; + irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu)); - cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); - --weight; } } prev = next; @@ -1601,7 +1609,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) } } - err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node); + err = irq_setup(irqs, nvec - start_irq_index, gc->numa_node, false); if (err) goto free_irq; From b25b07465a8f473faeb96e0bac16ffdf3d98e9bc Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Mon, 15 Dec 2025 13:43:50 +0000 Subject: [PATCH 06/17] net: mana: Allocate MSI-X vectors dynamically jira LE-4467 commit-author Shradha Gupta commit 755391121038c06cb653241aa94dcabd87179f62 upstream-diff There were conflicts seen when applying this patch due to following commit present in our tree before this patch. 590bcf15ae4a ("net: mana: Add handler for hardware servicing events") Currently, the MANA driver allocates MSI-X vectors statically based on MANA_MAX_NUM_QUEUES and num_online_cpus() values and in some cases ends up allocating more vectors than it needs. This is because, by this time we do not have a HW channel and do not know how many IRQs should be allocated. To avoid this, we allocate 1 MSI-X vector during the creation of HWC and after getting the value supported by hardware, dynamically add the remaining MSI-X vectors. Signed-off-by: Shradha Gupta Reviewed-by: Haiyang Zhang (cherry picked from commit 755391121038c06cb653241aa94dcabd87179f62) Signed-off-by: Shreeya Patel Signed-off-by: Shreeya Patel --- .../net/ethernet/microsoft/mana/gdma_main.c | 311 +++++++++++++----- include/net/mana/gdma.h | 7 +- 2 files changed, 235 insertions(+), 83 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index e9c87ca27cc5c..a468cd8e5f361 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -102,8 +104,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) return err ? err : -EPROTO; } - if (gc->num_msix_usable > resp.max_msix) - gc->num_msix_usable = resp.max_msix; + if (!pci_msix_can_alloc_dyn(pdev)) { + if (gc->num_msix_usable > resp.max_msix) + gc->num_msix_usable = resp.max_msix; + } else { + /* If dynamic allocation is enabled we have already allocated + * hwc msi + */ + gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1); + } if (gc->num_msix_usable <= 1) return -ENOSPC; @@ -637,7 +646,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue, } queue->eq.msix_index = msi_index; - gic = &gc->irq_contexts[msi_index]; + gic = xa_load(&gc->irq_contexts, msi_index); + if (WARN_ON(!gic)) + return -EINVAL; spin_lock_irqsave(&gic->lock, flags); list_add_rcu(&queue->entry, &gic->eq_list); @@ -662,7 +673,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue) if (WARN_ON(msix_index >= gc->num_msix_usable)) return; - gic = &gc->irq_contexts[msix_index]; + gic = xa_load(&gc->irq_contexts, msix_index); + if (WARN_ON(!gic)) + return; + spin_lock_irqsave(&gic->lock, flags); list_for_each_entry_rcu(eq, &gic->eq_list, entry) { if (queue == eq) { @@ -1523,47 +1537,108 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node, return 0; } -static int mana_gd_setup_irqs(struct pci_dev *pdev) +static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) { struct gdma_context *gc = pci_get_drvdata(pdev); - unsigned int max_queues_per_port; struct gdma_irq_context *gic; - unsigned int max_irqs, cpu; - int start_irq_index = 1; - int nvec, *irqs, irq; - int err, i = 0, j; + bool skip_first_cpu = false; + int *irqs, irq, err, i; - cpus_read_lock(); - max_queues_per_port = num_online_cpus(); - if (max_queues_per_port > MANA_MAX_NUM_QUEUES) - max_queues_per_port = MANA_MAX_NUM_QUEUES; + irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + /* + * While processing the next pci irq vector, we start with index 1, + * as IRQ vector at index 0 is already processed for HWC. + * However, the population of irqs array starts with index 0, to be + * further used in irq_setup() + */ + for (i = 1; i <= nvec; i++) { + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + err = -ENOMEM; + goto free_irq; + } + gic->handler = mana_gd_process_eq_events; + INIT_LIST_HEAD(&gic->eq_list); + spin_lock_init(&gic->lock); - /* Need 1 interrupt for the Hardware communication Channel (HWC) */ - max_irqs = max_queues_per_port + 1; + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", + i - 1, pci_name(pdev)); - nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); - if (nvec < 0) { - cpus_read_unlock(); - return nvec; + /* one pci vector is already allocated for HWC */ + irqs[i - 1] = pci_irq_vector(pdev, i); + if (irqs[i - 1] < 0) { + err = irqs[i - 1]; + goto free_current_gic; + } + + err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_current_gic; + + xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } - if (nvec <= num_online_cpus()) - start_irq_index = 0; - irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL); - if (!irqs) { - err = -ENOMEM; - goto free_irq_vector; + /* + * When calling irq_setup() for dynamically added IRQs, if number of + * CPUs is more than or equal to allocated MSI-X, we need to skip the + * first CPU sibling group since they are already affinitized to HWC IRQ + */ + cpus_read_lock(); + if (gc->num_msix_usable <= num_online_cpus()) + skip_first_cpu = true; + + err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu); + if (err) { + cpus_read_unlock(); + goto free_irq; } - gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context), - GFP_KERNEL); - if (!gc->irq_contexts) { - err = -ENOMEM; - goto free_irq_array; + cpus_read_unlock(); + kfree(irqs); + return 0; + +free_current_gic: + kfree(gic); +free_irq: + for (i -= 1; i > 0; i--) { + irq = pci_irq_vector(pdev, i); + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; + + irq_update_affinity_hint(irq, NULL); + free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } + kfree(irqs); + return err; +} + +static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_irq_context *gic; + int *irqs, *start_irqs, irq; + unsigned int cpu; + int err, i; + + irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + start_irqs = irqs; for (i = 0; i < nvec; i++) { - gic = &gc->irq_contexts[i]; + gic = kzalloc(sizeof(*gic), GFP_KERNEL); + if (!gic) { + err = -ENOMEM; + goto free_irq; + } + gic->handler = mana_gd_process_eq_events; INIT_LIST_HEAD(&gic->eq_list); spin_lock_init(&gic->lock); @@ -1575,69 +1650,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", i - 1, pci_name(pdev)); - irq = pci_irq_vector(pdev, i); - if (irq < 0) { - err = irq; - goto free_irq; + irqs[i] = pci_irq_vector(pdev, i); + if (irqs[i] < 0) { + err = irqs[i]; + goto free_current_gic; } - if (!i) { - err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_irq; - - /* If number of IRQ is one extra than number of online CPUs, - * then we need to assign IRQ0 (hwc irq) and IRQ1 to - * same CPU. - * Else we will use different CPUs for IRQ0 and IRQ1. - * Also we are using cpumask_local_spread instead of - * cpumask_first for the node, because the node can be - * mem only. - */ - if (start_irq_index) { - cpu = cpumask_local_spread(i, gc->numa_node); - irq_set_affinity_and_hint(irq, cpumask_of(cpu)); - } else { - irqs[start_irq_index] = irq; - } - } else { - irqs[i - start_irq_index] = irq; - err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0, - gic->name, gic); - if (err) - goto free_irq; - } + err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_current_gic; + + xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); } - err = irq_setup(irqs, nvec - start_irq_index, gc->numa_node, false); - if (err) + /* If number of IRQ is one extra than number of online CPUs, + * then we need to assign IRQ0 (hwc irq) and IRQ1 to + * same CPU. + * Else we will use different CPUs for IRQ0 and IRQ1. + * Also we are using cpumask_local_spread instead of + * cpumask_first for the node, because the node can be + * mem only. + */ + cpus_read_lock(); + if (nvec > num_online_cpus()) { + cpu = cpumask_local_spread(0, gc->numa_node); + irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu)); + irqs++; + nvec -= 1; + } + + err = irq_setup(irqs, nvec, gc->numa_node, false); + if (err) { + cpus_read_unlock(); goto free_irq; + } - gc->max_num_msix = nvec; - gc->num_msix_usable = nvec; cpus_read_unlock(); - kfree(irqs); + kfree(start_irqs); return 0; +free_current_gic: + kfree(gic); free_irq: - for (j = i - 1; j >= 0; j--) { - irq = pci_irq_vector(pdev, j); - gic = &gc->irq_contexts[j]; + for (i -= 1; i >= 0; i--) { + irq = pci_irq_vector(pdev, i); + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } - kfree(gc->irq_contexts); - gc->irq_contexts = NULL; -free_irq_array: - kfree(irqs); -free_irq_vector: - cpus_read_unlock(); - pci_free_irq_vectors(pdev); + kfree(start_irqs); return err; } +static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + unsigned int max_irqs, min_irqs; + int nvec, err; + + if (pci_msix_can_alloc_dyn(pdev)) { + max_irqs = 1; + min_irqs = 1; + } else { + /* Need 1 interrupt for HWC */ + max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1; + min_irqs = 2; + } + + nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX); + if (nvec < 0) + return nvec; + + err = mana_gd_setup_irqs(pdev, nvec); + if (err) { + pci_free_irq_vectors(pdev); + return err; + } + + gc->num_msix_usable = nvec; + gc->max_num_msix = nvec; + + return 0; +} + +static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct msi_map irq_map; + int max_irqs, i, err; + + if (!pci_msix_can_alloc_dyn(pdev)) + /* remain irqs are already allocated with HWC IRQ */ + return 0; + + /* allocate only remaining IRQs*/ + max_irqs = gc->num_msix_usable - 1; + + for (i = 1; i <= max_irqs; i++) { + irq_map = pci_msix_alloc_irq_at(pdev, i, NULL); + if (!irq_map.virq) { + err = irq_map.index; + /* caller will handle cleaning up all allocated + * irqs, after HWC is destroyed + */ + return err; + } + } + + err = mana_gd_setup_dyn_irqs(pdev, max_irqs); + if (err) + return err; + + gc->max_num_msix = gc->max_num_msix + max_irqs; + + return 0; +} + static void mana_gd_remove_irqs(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -1652,19 +1786,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev) if (irq < 0) continue; - gic = &gc->irq_contexts[i]; + gic = xa_load(&gc->irq_contexts, i); + if (WARN_ON(!gic)) + continue; /* Need to clear the hint before free_irq */ irq_update_affinity_hint(irq, NULL); free_irq(irq, gic); + xa_erase(&gc->irq_contexts, i); + kfree(gic); } pci_free_irq_vectors(pdev); gc->max_num_msix = 0; gc->num_msix_usable = 0; - kfree(gc->irq_contexts); - gc->irq_contexts = NULL; } static int mana_gd_setup(struct pci_dev *pdev) @@ -1679,9 +1815,10 @@ static int mana_gd_setup(struct pci_dev *pdev) if (!gc->service_wq) return -ENOMEM; - err = mana_gd_setup_irqs(pdev); + err = mana_gd_setup_hwc_irqs(pdev); if (err) { - dev_err(gc->dev, "Failed to setup IRQs: %d\n", err); + dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n", + err); goto free_workqueue; } @@ -1697,6 +1834,12 @@ static int mana_gd_setup(struct pci_dev *pdev) if (err) goto destroy_hwc; + err = mana_gd_setup_remaining_irqs(pdev); + if (err) { + dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err); + goto destroy_hwc; + } + err = mana_gd_detect_devices(pdev); if (err) goto destroy_hwc; @@ -1777,6 +1920,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) gc->is_pf = mana_is_pf(pdev->device); gc->bar0_va = bar0_va; gc->dev = &pdev->dev; + xa_init(&gc->irq_contexts); if (gc->is_pf) gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root); @@ -1811,6 +1955,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ debugfs_remove_recursive(gc->mana_pci_debugfs); gc->mana_pci_debugfs = NULL; + xa_destroy(&gc->irq_contexts); pci_iounmap(pdev, bar0_va); free_gc: pci_set_drvdata(pdev, NULL); @@ -1836,6 +1981,8 @@ static void mana_gd_remove(struct pci_dev *pdev) gc->mana_pci_debugfs = NULL; + xa_destroy(&gc->irq_contexts); + pci_iounmap(pdev, gc->bar0_va); vfree(gc); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 79516db61bcae..6c300fd325237 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -390,7 +390,7 @@ struct gdma_context { unsigned int max_num_queues; unsigned int max_num_msix; unsigned int num_msix_usable; - struct gdma_irq_context *irq_contexts; + struct xarray irq_contexts; /* L2 MTU */ u16 adapter_mtu; @@ -582,18 +582,23 @@ enum { /* Driver can handle holes (zeros) in the device list */ #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) +/* Driver supports dynamic MSI-X vector allocation */ +#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13) + /* Driver can self reset on EQE notification */ #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14) /* Driver can self reset on FPGA Reconfig EQE notification */ #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) + #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ + GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE) From a1e537563439a9b5bdfb3673a73f69a3be4f1c2e Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Tue, 16 Dec 2025 13:07:11 +0000 Subject: [PATCH 07/17] net: mana: Add support for net_shaper_ops jira LE-4473 commit-author Erni Sri Satya Vennela commit 75cabb46935b6de8e2bdfde563e460ac41cfff12 Introduce support for net_shaper_ops in the MANA driver, enabling configuration of rate limiting on the MANA NIC. To apply rate limiting, the driver issues a HWC command via mana_set_bw_clamp() and updates the corresponding shaper object in the net_shaper cache. If an error occurs during this process, the driver restores the previous speed by querying the current link configuration using mana_query_link_cfg(). The minimum supported bandwidth is 100 Mbps, and only values that are exact multiples of 100 Mbps are allowed. Any other values are rejected. To remove a shaper, the driver resets the bandwidth to the maximum supported by the SKU using mana_set_bw_clamp() and clears the associated cache entry. If an error occurs during this process, the shaper details are retained. On the hardware that does not support these APIs, the net-shaper calls to set speed would fail. Set the speed: ./tools/net/ynl/pyynl/cli.py \ --spec Documentation/netlink/specs/net_shaper.yaml \ --do set --json '{"ifindex":'$IFINDEX', "handle":{"scope": "netdev", "id":'$ID' }, "bw-max": 200000000 }' Get the shaper details: ./tools/net/ynl/pyynl/cli.py \ --spec Documentation/netlink/specs/net_shaper.yaml \ --do get --json '{"ifindex":'$IFINDEX', "handle":{"scope": "netdev", "id":'$ID' }}' > {'bw-max': 200000000, > 'handle': {'scope': 'netdev'}, > 'ifindex': $IFINDEX, > 'metric': 'bps'} Delete the shaper object: ./tools/net/ynl/pyynl/cli.py \ --spec Documentation/netlink/specs/net_shaper.yaml \ --do delete --json '{"ifindex":'$IFINDEX', "handle":{"scope": "netdev","id":'$ID' }}' Signed-off-by: Erni Sri Satya Vennela Reviewed-by: Haiyang Zhang Reviewed-by: Shradha Gupta Reviewed-by: Saurabh Singh Sengar Reviewed-by: Long Li Link: https://patch.msgid.link/1750144656-2021-3-git-send-email-ernis@linux.microsoft.com Signed-off-by: Paolo Abeni (cherry picked from commit 75cabb46935b6de8e2bdfde563e460ac41cfff12) Signed-off-by: Shreeya Patel --- drivers/net/ethernet/microsoft/mana/mana_en.c | 155 ++++++++++++++++++ include/net/mana/mana.h | 40 +++++ 2 files changed, 195 insertions(+) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 721d049c54de2..6c6e9e952a9be 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -731,6 +731,78 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu) return err; } +static int mana_shaper_set(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(binding->netdev); + u32 old_speed, rate; + int err; + + if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) { + NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev"); + return -EINVAL; + } + + if (apc->handle.id && shaper->handle.id != apc->handle.id) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers"); + return -EOPNOTSUPP; + } + + if (!shaper->bw_max || (shaper->bw_max % 100000000)) { + NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth"); + return -EINVAL; + } + + rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */ + rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */ + + /* Get current speed */ + err = mana_query_link_cfg(apc); + old_speed = (err) ? SPEED_UNKNOWN : apc->speed; + + if (!err) { + err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE); + apc->speed = (err) ? old_speed : rate; + apc->handle = (err) ? apc->handle : shaper->handle; + } + + return err; +} + +static int mana_shaper_del(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(binding->netdev); + int err; + + err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE); + + if (!err) { + /* Reset mana port context parameters */ + apc->handle.id = 0; + apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; + apc->speed = 0; + } + + return err; +} + +static void mana_shaper_cap(struct net_shaper_binding *binding, + enum net_shaper_scope scope, + unsigned long *flags) +{ + *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) | + BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS); +} + +static const struct net_shaper_ops mana_shaper_ops = { + .set = mana_shaper_set, + .delete = mana_shaper_del, + .capabilities = mana_shaper_cap, +}; + static const struct net_device_ops mana_devops = { .ndo_open = mana_open, .ndo_stop = mana_close, @@ -741,6 +813,7 @@ static const struct net_device_ops mana_devops = { .ndo_bpf = mana_bpf, .ndo_xdp_xmit = mana_xdp_xmit, .ndo_change_mtu = mana_change_mtu, + .net_shaper_ops = &mana_shaper_ops, }; static void mana_cleanup_port_context(struct mana_port_context *apc) @@ -1184,6 +1257,86 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, return err; } +int mana_query_link_cfg(struct mana_port_context *apc) +{ + struct net_device *ndev = apc->ndev; + struct mana_query_link_config_resp resp = {}; + struct mana_query_link_config_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG, + sizeof(req), sizeof(resp)); + + req.vport = apc->port_handle; + req.hdr.resp.msg_version = GDMA_MESSAGE_V2; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + + if (err) { + netdev_err(ndev, "Failed to query link config: %d\n", err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG, + sizeof(resp)); + + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EOPNOTSUPP; + return err; + } + + if (resp.qos_unconfigured) { + err = -EINVAL; + return err; + } + apc->speed = resp.link_speed_mbps; + return 0; +} + +int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, + int enable_clamping) +{ + struct mana_set_bw_clamp_resp resp = {}; + struct mana_set_bw_clamp_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP, + sizeof(req), sizeof(resp)); + req.vport = apc->port_handle; + req.link_speed_mbps = speed; + req.enable_clamping = enable_clamping; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + + if (err) { + netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d", + speed, err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP, + sizeof(resp)); + + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EOPNOTSUPP; + return err; + } + + if (resp.qos_unconfigured) + netdev_info(ndev, "QoS is unconfigured\n"); + + return 0; +} + int mana_create_wq_obj(struct mana_port_context *apc, mana_handle_t vport, u32 wq_type, struct mana_obj_spec *wq_spec, @@ -3024,6 +3177,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, goto free_indir; } + debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); + return 0; free_indir: diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 4176edf1be719..038b18340e513 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -5,6 +5,7 @@ #define _MANA_H #include +#include #include "gdma.h" #include "hw_channel.h" @@ -526,7 +527,12 @@ struct mana_port_context { struct mutex vport_mutex; int vport_use_count; + /* Net shaper handle*/ + struct net_shaper_handle handle; + u16 port_idx; + /* Currently configured speed (mbps) */ + u32 speed; bool port_is_up; bool port_st_save; /* Saved port state */ @@ -562,6 +568,9 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); void mana_query_gf_stats(struct mana_port_context *apc); +int mana_query_link_cfg(struct mana_port_context *apc); +int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, + int enable_clamping); void mana_query_phy_stats(struct mana_port_context *apc); int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); @@ -589,6 +598,8 @@ enum mana_command_code { MANA_FENCE_RQ = 0x20006, MANA_CONFIG_VPORT_RX = 0x20007, MANA_QUERY_VPORT_CONFIG = 0x20008, + MANA_QUERY_LINK_CONFIG = 0x2000A, + MANA_SET_BW_CLAMP = 0x2000B, MANA_QUERY_PHY_STAT = 0x2000c, /* Privileged commands for the PF mode */ @@ -598,6 +609,35 @@ enum mana_command_code { MANA_DEREGISTER_HW_PORT = 0x28004, }; +/* Query Link Configuration*/ +struct mana_query_link_config_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; +}; /* HW DATA */ + +struct mana_query_link_config_resp { + struct gdma_resp_hdr hdr; + u32 qos_speed_mbps; + u8 qos_unconfigured; + u8 reserved1[3]; + u32 link_speed_mbps; + u8 reserved2[4]; +}; /* HW DATA */ + +/* Set Bandwidth Clamp*/ +struct mana_set_bw_clamp_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + enum TRI_STATE enable_clamping; + u32 link_speed_mbps; +}; /* HW DATA */ + +struct mana_set_bw_clamp_resp { + struct gdma_resp_hdr hdr; + u8 qos_unconfigured; + u8 reserved[7]; +}; /* HW DATA */ + /* Query Device Configuration */ struct mana_query_device_cfg_req { struct gdma_req_hdr hdr; From aa25a6ab57777c1207482252280ac172a14328fd Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Tue, 16 Dec 2025 13:09:02 +0000 Subject: [PATCH 08/17] net: mana: Add speed support in mana_get_link_ksettings jira LE-4473 commit-author Erni Sri Satya Vennela commit a6d5edf11e0cf5a4650f1d353d20ec29de093813 Allow mana ethtool get_link_ksettings operation to report the maximum speed supported by the SKU in mbps. The driver retrieves this information by issuing a HWC command to the hardware via mana_query_link_cfg(), which retrieves the SKU's maximum supported speed. These APIs when invoked on hardware that are older/do not support these APIs, the speed would be reported as UNKNOWN. Before: $ethtool enP30832s1 > Settings for enP30832s1: Supported ports: [ ] Supported link modes: Not reported Supported pause frame use: No Supports auto-negotiation: No Supported FEC modes: Not reported Advertised link modes: Not reported Advertised pause frame use: No Advertised auto-negotiation: No Advertised FEC modes: Not reported Speed: Unknown! Duplex: Full Auto-negotiation: off Port: Other PHYAD: 0 Transceiver: internal Link detected: yes After: $ethtool enP30832s1 > Settings for enP30832s1: Supported ports: [ ] Supported link modes: Not reported Supported pause frame use: No Supports auto-negotiation: No Supported FEC modes: Not reported Advertised link modes: Not reported Advertised pause frame use: No Advertised auto-negotiation: No Advertised FEC modes: Not reported Speed: 16000Mb/s Duplex: Full Auto-negotiation: off Port: Other PHYAD: 0 Transceiver: internal Link detected: yes Signed-off-by: Erni Sri Satya Vennela Reviewed-by: Haiyang Zhang Reviewed-by: Shradha Gupta Reviewed-by: Saurabh Singh Sengar Reviewed-by: Long Li Link: https://patch.msgid.link/1750144656-2021-4-git-send-email-ernis@linux.microsoft.com Signed-off-by: Paolo Abeni (cherry picked from commit a6d5edf11e0cf5a4650f1d353d20ec29de093813) Signed-off-by: Shreeya Patel --- drivers/net/ethernet/microsoft/mana/mana_en.c | 1 + drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 6 ++++++ include/net/mana/mana.h | 2 ++ 3 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 6c6e9e952a9be..f900ca01f5f23 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1294,6 +1294,7 @@ int mana_query_link_cfg(struct mana_port_context *apc) return err; } apc->speed = resp.link_speed_mbps; + apc->max_speed = resp.qos_speed_mbps; return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index 4fb3a04994a2d..a1afa75a94631 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -495,6 +495,12 @@ static int mana_set_ringparam(struct net_device *ndev, static int mana_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + err = mana_query_link_cfg(apc); + cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed; + cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_OTHER; diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 038b18340e513..e1030a7d2daab 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -533,6 +533,8 @@ struct mana_port_context { u16 port_idx; /* Currently configured speed (mbps) */ u32 speed; + /* Maximum speed supported by the SKU (mbps) */ + u32 max_speed; bool port_is_up; bool port_st_save; /* Saved port state */ From 00e04b74b4b322896dcf9b8186ad7fc6b76d9b8f Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Tue, 16 Dec 2025 13:42:16 +0000 Subject: [PATCH 09/17] net: mana: Handle unsupported HWC commands jira LE-4473 commit-author Erni Sri Satya Vennela commit ca8ac489ca33c986ff02ee14c3e1c10b86355428 upstream-diff There were conflicts seen when applying this patch due to the following patch being in our tree before this one. 7a3c23599984 ("net: mana: Handle Reset Request from MANA NIC") If any of the HWC commands are not recognized by the underlying hardware, the hardware returns the response header status of -1. Log the information using netdev_info_once to avoid multiple error logs in dmesg. Signed-off-by: Erni Sri Satya Vennela Reviewed-by: Haiyang Zhang Reviewed-by: Shradha Gupta Reviewed-by: Saurabh Singh Sengar Reviewed-by: Dipayaan Roy Link: https://patch.msgid.link/1750144656-2021-5-git-send-email-ernis@linux.microsoft.com Signed-off-by: Paolo Abeni (cherry picked from commit ca8ac489ca33c986ff02ee14c3e1c10b86355428) Signed-off-by: Shreeya Patel --- drivers/net/ethernet/microsoft/mana/mana_en.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index f900ca01f5f23..a1389d2e37a9a 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1274,6 +1274,10 @@ int mana_query_link_cfg(struct mana_port_context *apc) sizeof(resp)); if (err) { + if (err == -EOPNOTSUPP) { + netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n"); + return err; + } netdev_err(ndev, "Failed to query link config: %d\n", err); return err; } @@ -1316,6 +1320,10 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, sizeof(resp)); if (err) { + if (err == -EOPNOTSUPP) { + netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n"); + return err; + } netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d", speed, err); return err; From 4ae0535bcf6b5e7f914886b1daba240e17ef2108 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Tue, 16 Dec 2025 13:49:04 +0000 Subject: [PATCH 10/17] net: mana: Fix build errors when CONFIG_NET_SHAPER is disabled jira LE-4473 commit-author Erni Sri Satya Vennela commit 11cd0206987205ee05b0abd70a8eafa400ba89e3 Fix build errors when CONFIG_NET_SHAPER is disabled, including: drivers/net/ethernet/microsoft/mana/mana_en.c:804:10: error: 'const struct net_device_ops' has no member named 'net_shaper_ops' 804 | .net_shaper_ops = &mana_shaper_ops, drivers/net/ethernet/microsoft/mana/mana_en.c:804:35: error: initialization of 'int (*)(struct net_device *, struct neigh_parms *)' from incompatible pointer type 'const struct net_shaper_ops *' [-Werror=incompatible-pointer-types] 804 | .net_shaper_ops = &mana_shaper_ops, Signed-off-by: Erni Sri Satya Vennela Fixes: 75cabb46935b ("net: mana: Add support for net_shaper_ops") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202506230625.bfUlqb8o-lkp@intel.com/ Reviewed-by: Simon Horman Link: https://patch.msgid.link/1750851355-8067-1-git-send-email-ernis@linux.microsoft.com Signed-off-by: Jakub Kicinski (cherry picked from commit 11cd0206987205ee05b0abd70a8eafa400ba89e3) Signed-off-by: Shreeya Patel --- drivers/net/ethernet/microsoft/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index 901fbffbf718e..3f36ee6a8ecee 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -22,6 +22,7 @@ config MICROSOFT_MANA depends on PCI_HYPERV select AUXILIARY_BUS select PAGE_POOL + select NET_SHAPER help This driver supports Microsoft Azure Network Adapter (MANA). So far, the driver is only supported on X86_64. From f5813d0e4e51e90da306db104aae9d85cac722ee Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 11:39:27 +0000 Subject: [PATCH 11/17] RDMA/mana_ib: add additional port counters jira LE-4527 commit-author Zhiyue Qiu commit 084f35b84f57e059b542ea44240a51b294a096a1 Add packet and request port counters to mana_ib. Signed-off-by: Zhiyue Qiu Signed-off-by: Konstantin Taranov Link: https://patch.msgid.link/1752143395-5324-1-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Long Li Signed-off-by: Leon Romanovsky (cherry picked from commit 084f35b84f57e059b542ea44240a51b294a096a1) Signed-off-by: Shreeya Patel --- drivers/infiniband/hw/mana/counters.c | 18 ++++++++++++++++++ drivers/infiniband/hw/mana/counters.h | 8 ++++++++ drivers/infiniband/hw/mana/mana_ib.h | 8 ++++++++ 3 files changed, 34 insertions(+) diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c index 6a81365d3b951..e964e74be48da 100644 --- a/drivers/infiniband/hw/mana/counters.c +++ b/drivers/infiniband/hw/mana/counters.c @@ -32,6 +32,14 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = { [MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events", [MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered", [MANA_IB_CURRENT_RATE].name = "current_rate", + [MANA_IB_DUP_RX_REQ].name = "dup_rx_requests", + [MANA_IB_TX_BYTES].name = "tx_bytes", + [MANA_IB_RX_BYTES].name = "rx_bytes", + [MANA_IB_RX_SEND_REQ].name = "rx_send_requests", + [MANA_IB_RX_WRITE_REQ].name = "rx_write_requests", + [MANA_IB_RX_READ_REQ].name = "rx_read_requests", + [MANA_IB_TX_PKT].name = "tx_packets", + [MANA_IB_RX_PKT].name = "rx_packets", }; static const struct rdma_stat_desc mana_ib_device_stats_desc[] = { @@ -100,6 +108,7 @@ static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_sta mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS, sizeof(req), sizeof(resp)); + req.hdr.resp.msg_version = GDMA_MESSAGE_V2; req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; @@ -148,6 +157,15 @@ static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_sta stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered; stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate; + stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req; + stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes; + stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes; + stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req; + stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req; + stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req; + stats->value[MANA_IB_TX_PKT] = resp.tx_pkt; + stats->value[MANA_IB_RX_PKT] = resp.rx_pkt; + return ARRAY_SIZE(mana_ib_port_stats_desc); } diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h index 987a6fee83c94..f68e776bb41d7 100644 --- a/drivers/infiniband/hw/mana/counters.h +++ b/drivers/infiniband/hw/mana/counters.h @@ -35,6 +35,14 @@ enum mana_ib_port_counters { MANA_IB_RATE_INC_EVENTS, MANA_IB_NUM_QPS_RECOVERED, MANA_IB_CURRENT_RATE, + MANA_IB_DUP_RX_REQ, + MANA_IB_TX_BYTES, + MANA_IB_RX_BYTES, + MANA_IB_RX_SEND_REQ, + MANA_IB_RX_WRITE_REQ, + MANA_IB_RX_READ_REQ, + MANA_IB_TX_PKT, + MANA_IB_RX_PKT, }; enum mana_ib_device_counters { diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index eddd0a83b97ee..369825fdeff86 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -516,6 +516,14 @@ struct mana_rnic_query_vf_cntrs_resp { u64 rate_inc_events; u64 num_qps_recovered; u64 current_rate; + u64 dup_rx_req; + u64 tx_bytes; + u64 rx_bytes; + u64 rx_send_req; + u64 rx_write_req; + u64 rx_read_req; + u64 tx_pkt; + u64 rx_pkt; }; /* HW Data */ struct mana_rnic_query_device_cntrs_req { From 3bef1f11f43652410935f5b4db3ef11e24e161bf Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 11:54:39 +0000 Subject: [PATCH 12/17] RDMA/mana_ib: Drain send wrs of GSI QP jira LE-4524 commit-author Konstantin Taranov commit 44d69d3cf2e8047c279cbb9708f05e2c43e33234 Drain send WRs of the GSI QP on device removal. In rare servicing scenarios, the hardware may delete the state of the GSI QP, preventing it from generating CQEs for pending send WRs. Since WRs submitted to the GSI QP hold CM resources, the device cannot be removed until those WRs are completed. This patch marks all pending send WRs as failed, allowing the GSI QP to release the CM resources and enabling safe device removal. Signed-off-by: Konstantin Taranov Link: https://patch.msgid.link/1753779618-23629-1-git-send-email-kotaranov@linux.microsoft.com Signed-off-by: Leon Romanovsky (cherry picked from commit 44d69d3cf2e8047c279cbb9708f05e2c43e33234) Signed-off-by: Shreeya Patel --- drivers/infiniband/hw/mana/cq.c | 26 ++++++++++++++++++++++++++ drivers/infiniband/hw/mana/device.c | 3 +++ drivers/infiniband/hw/mana/mana_ib.h | 3 +++ 3 files changed, 32 insertions(+) diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index 28e154bbb50f8..1becc87791235 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -291,6 +291,32 @@ static int mana_process_completions(struct mana_ib_cq *cq, int nwc, struct ib_wc return wc_index; } +void mana_drain_gsi_sqs(struct mana_ib_dev *mdev) +{ + struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false); + struct ud_sq_shadow_wqe *shadow_wqe; + struct mana_ib_cq *cq; + unsigned long flags; + + if (!qp) + return; + + cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); + + spin_lock_irqsave(&cq->cq_lock, flags); + while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq)) + != NULL) { + shadow_wqe->header.error_code = IB_WC_GENERAL_ERR; + shadow_queue_advance_next_to_complete(&qp->shadow_sq); + } + spin_unlock_irqrestore(&cq->cq_lock, flags); + + if (cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); + + mana_put_qp_ref(qp); +} + int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c index 65d0af740e5a8..eceb6fca4c6aa 100644 --- a/drivers/infiniband/hw/mana/device.c +++ b/drivers/infiniband/hw/mana/device.c @@ -224,6 +224,9 @@ static void mana_ib_remove(struct auxiliary_device *adev) { struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev); + if (mana_ib_is_rnic(dev)) + mana_drain_gsi_sqs(dev); + ib_unregister_device(&dev->ib_dev); dma_pool_destroy(dev->av_pool); if (mana_ib_is_rnic(dev)) { diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 369825fdeff86..2c3400213b12f 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -43,6 +43,8 @@ */ #define MANA_AV_BUFFER_SIZE 64 +#define MANA_GSI_QPN (1) + struct mana_ib_adapter_caps { u32 max_sq_id; u32 max_rq_id; @@ -716,6 +718,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr); +void mana_drain_gsi_sqs(struct mana_ib_dev *mdev); int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); From 22b9757a73bad4c7efbcb94e3bd1f0c8c4966f31 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 12:08:39 +0000 Subject: [PATCH 13/17] net: hv_netvsc: fix loss of early receive events from host during channel open. jira LE-4494 commit-author Dipayaan Roy commit 9448ccd853368582efa9db05db344f8bb9dffe0f The hv_netvsc driver currently enables NAPI after opening the primary and subchannels. This ordering creates a race: if the Hyper-V host places data in the host -> guest ring buffer and signals the channel before napi_enable() has been called, the channel callback will run but napi_schedule_prep() will return false. As a result, the NAPI poller never gets scheduled, the data in the ring buffer is not consumed, and the receive queue may remain permanently stuck until another interrupt happens to arrive. Fix this by enabling NAPI and registering it with the RX/TX queues before vmbus channel is opened. This guarantees that any early host signal after open will correctly trigger NAPI scheduling and the ring buffer will be drained. Fixes: 76bb5db5c749d ("netvsc: fix use after free on module removal") Signed-off-by: Dipayaan Roy Link: https://patch.msgid.link/20250825115627.GA32189@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net Signed-off-by: Jakub Kicinski (cherry picked from commit 9448ccd853368582efa9db05db344f8bb9dffe0f) Signed-off-by: Shreeya Patel --- drivers/net/hyperv/netvsc.c | 17 ++++++++--------- drivers/net/hyperv/rndis_filter.c | 23 ++++++++++++++++------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 720104661d7f2..60a4629fe6ba7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1812,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, /* Enable NAPI handler before init callbacks */ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); + napi_enable(&net_device->chan_table[0].napi); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, + &net_device->chan_table[0].napi); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, + &net_device->chan_table[0].napi); /* Open the channel */ device->channel->next_request_id_callback = vmbus_next_request_id; @@ -1831,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, /* Channel is opened */ netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); - napi_enable(&net_device->chan_table[0].napi); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, - &net_device->chan_table[0].napi); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, - &net_device->chan_table[0].napi); - /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { @@ -1854,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, close: RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL); - netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL); - napi_disable(&net_device->chan_table[0].napi); /* Now, we can close the channel safely */ vmbus_close(device->channel); cleanup: + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL); + netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL); + napi_disable(&net_device->chan_table[0].napi); netif_napi_del(&net_device->chan_table[0].napi); cleanup2: diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9e73959e61ee0..c35f9685b6bf0 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE; + /* Enable napi before opening the vmbus channel to avoid races + * as the host placing data on the host->guest ring may be left + * out if napi was not enabled. + */ + napi_enable(&nvchan->napi); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, + &nvchan->napi); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, + &nvchan->napi); + ret = vmbus_open(new_sc, netvsc_ring_bytes, netvsc_ring_bytes, NULL, 0, netvsc_channel_cb, nvchan); - if (ret == 0) { - napi_enable(&nvchan->napi); - netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, - &nvchan->napi); - netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, - &nvchan->napi); - } else { + if (ret != 0) { netdev_notice(ndev, "sub channel open failed: %d\n", ret); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX, + NULL); + netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX, + NULL); + napi_disable(&nvchan->napi); } if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) From c98abdead065eec79b6797a0b0b4c6cfb0a5fdcc Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 12:17:02 +0000 Subject: [PATCH 14/17] net: mana: Reduce waiting time if HWC not responding jira LE-4497 commit-author Haiyang Zhang commit c4deabbc1abe452ea230b86d53ed3711e5a8a062 If HW Channel (HWC) is not responding, reduce the waiting time, so further steps will fail quickly. This will prevent getting stuck for a long time (30 minutes or more), for example, during unloading while HWC is not responding. Signed-off-by: Haiyang Zhang Link: https://patch.msgid.link/1757537841-5063-1-git-send-email-haiyangz@linux.microsoft.com Signed-off-by: Jakub Kicinski (cherry picked from commit c4deabbc1abe452ea230b86d53ed3711e5a8a062) Signed-off-by: Shreeya Patel --- drivers/net/ethernet/microsoft/mana/hw_channel.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index ef072e24c46d0..ada6c78a2bef4 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -881,7 +881,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, if (!wait_for_completion_timeout(&ctx->comp_event, (msecs_to_jiffies(hwc->hwc_timeout)))) { if (hwc->hwc_timeout != 0) - dev_err(hwc->dev, "HWC: Request timed out!\n"); + dev_err(hwc->dev, "HWC: Request timed out: %u ms\n", + hwc->hwc_timeout); + + /* Reduce further waiting if HWC no response */ + if (hwc->hwc_timeout > 1) + hwc->hwc_timeout = 1; err = -ETIMEDOUT; goto out; From 8fdc30ac7da1ba8d16235fda3a9bd69970138d50 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 12:19:53 +0000 Subject: [PATCH 15/17] RDMA/mana_ib: Extend modify QP jira LE-4521 commit-author Shiraz Saleem commit 2bd7dd383609f11330814ecc0d3c10b67073a6be Extend modify QP to support further attributes: local_ack_timeout, UD qkey, rate_limit, qp_access_flags, flow_label, max_rd_atomic. Signed-off-by: Shiraz Saleem Signed-off-by: Konstantin Taranov Link: https://patch.msgid.link/1757923172-4475-1-git-send-email-kotaranov@linux.microsoft.com Signed-off-by: Leon Romanovsky (cherry picked from commit 2bd7dd383609f11330814ecc0d3c10b67073a6be) Signed-off-by: Shreeya Patel --- drivers/infiniband/hw/mana/mana_ib.h | 11 +++++++++-- drivers/infiniband/hw/mana/qp.c | 9 +++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 2c3400213b12f..38ea58fe411c7 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -411,7 +411,7 @@ struct mana_ib_ah_attr { u8 traffic_class; u16 src_port; u16 dest_port; - u32 reserved; + u32 flow_label; }; struct mana_rnic_set_qp_state_req { @@ -428,8 +428,15 @@ struct mana_rnic_set_qp_state_req { u32 retry_cnt; u32 rnr_retry; u32 min_rnr_timer; - u32 reserved; + u32 rate_limit; struct mana_ib_ah_attr ah_attr; + u64 reserved1; + u32 qkey; + u32 qp_access_flags; + u8 local_ack_timeout; + u8 max_rd_atomic; + u16 reserved2; + u32 reserved3; }; /* HW Data */ struct mana_rnic_set_qp_state_resp { diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index a6bf4d539e670..48c1f4977f218 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int err; mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp)); + + req.hdr.req.msg_version = GDMA_MESSAGE_V3; req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.qp_handle = qp->qp_handle; @@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, req.retry_cnt = attr->retry_cnt; req.rnr_retry = attr->rnr_retry; req.min_rnr_timer = attr->min_rnr_timer; + req.rate_limit = attr->rate_limit; + req.qkey = attr->qkey; + req.local_ack_timeout = attr->timeout; + req.qp_access_flags = attr->qp_access_flags; + req.max_rd_atomic = attr->max_rd_atomic; + if (attr_mask & IB_QP_AV) { ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port); if (!ndev) { @@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ibqp->qp_num, attr->dest_qp_num); req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2; req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit; + req.ah_attr.flow_label = attr->ah_attr.grh.flow_label; } err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); From 9998f7d90ff7992d0ffc47c9f101ed3393d0dd35 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 12:24:09 +0000 Subject: [PATCH 16/17] scsi: storvsc: Prefer returning channel with the same CPU as on the I/O issuing CPU jira LE-4537 commit-author Long Li commit b69ffeaa0ae43892683113b3f4ddf156398738b9 When selecting an outgoing channel for I/O, storvsc tries to select a channel with a returning CPU that is not the same as issuing CPU. This worked well in the past, however it doesn't work well when the Hyper-V exposes a large number of channels (up to the number of all CPUs). Use a different CPU for returning channel is not efficient on Hyper-V. Change this behavior by preferring to the channel with the same CPU as the current I/O issuing CPU whenever possible. Tests have shown improvements in newer Hyper-V/Azure environment, and no regression with older Hyper-V/Azure environments. Tested-by: Raheel Abdul Faizy Signed-off-by: Long Li Message-Id: <1759381530-7414-1-git-send-email-longli@linux.microsoft.com> Signed-off-by: Martin K. Petersen (cherry picked from commit b69ffeaa0ae43892683113b3f4ddf156398738b9) Signed-off-by: Shreeya Patel --- drivers/scsi/storvsc_drv.c | 96 ++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 51 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3284f52ab1fec..82d73214c3b32 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, } /* - * Our channel array is sparsley populated and we + * Our channel array could be sparsley populated and we * initiated I/O on a processor/hw-q that does not * currently have a designated channel. Fix this. * The strategy is simple: - * I. Ensure NUMA locality - * II. Distribute evenly (best effort) + * I. Prefer the channel associated with the current CPU + * II. Ensure NUMA locality + * III. Distribute evenly (best effort) */ + /* Prefer the channel on the I/O issuing processor/hw-q */ + if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus)) + return stor_device->stor_chns[q_num]; + node_mask = cpumask_of_node(cpu_to_node(q_num)); num_channels = 0; @@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device, /* See storvsc_change_target_cpu(). */ outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); if (outgoing_channel != NULL) { - if (outgoing_channel->target_cpu == q_num) { - /* - * Ideally, we want to pick a different channel if - * available on the same NUMA node. - */ - node_mask = cpumask_of_node(cpu_to_node(q_num)); - for_each_cpu_wrap(tgt_cpu, - &stor_device->alloced_cpus, q_num + 1) { - if (!cpumask_test_cpu(tgt_cpu, node_mask)) - continue; - if (tgt_cpu == q_num) - continue; - channel = READ_ONCE( - stor_device->stor_chns[tgt_cpu]); - if (channel == NULL) - continue; - if (hv_get_avail_to_write_percent( - &channel->outbound) - > ring_avail_percent_lowater) { - outgoing_channel = channel; - goto found_channel; - } - } + if (hv_get_avail_to_write_percent(&outgoing_channel->outbound) + > ring_avail_percent_lowater) + goto found_channel; - /* - * All the other channels on the same NUMA node are - * busy. Try to use the channel on the current CPU - */ - if (hv_get_avail_to_write_percent( - &outgoing_channel->outbound) - > ring_avail_percent_lowater) + /* + * Channel is busy, try to find a channel on the same NUMA node + */ + node_mask = cpumask_of_node(cpu_to_node(q_num)); + for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, + q_num + 1) { + if (!cpumask_test_cpu(tgt_cpu, node_mask)) + continue; + channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); + if (!channel) + continue; + if (hv_get_avail_to_write_percent(&channel->outbound) + > ring_avail_percent_lowater) { + outgoing_channel = channel; goto found_channel; + } + } - /* - * If we reach here, all the channels on the current - * NUMA node are busy. Try to find a channel in - * other NUMA nodes - */ - for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { - if (cpumask_test_cpu(tgt_cpu, node_mask)) - continue; - channel = READ_ONCE( - stor_device->stor_chns[tgt_cpu]); - if (channel == NULL) - continue; - if (hv_get_avail_to_write_percent( - &channel->outbound) - > ring_avail_percent_lowater) { - outgoing_channel = channel; - goto found_channel; - } + /* + * If we reach here, all the channels on the current + * NUMA node are busy. Try to find a channel in + * all NUMA nodes + */ + for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, + q_num + 1) { + channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); + if (!channel) + continue; + if (hv_get_avail_to_write_percent(&channel->outbound) + > ring_avail_percent_lowater) { + outgoing_channel = channel; + goto found_channel; } } + /* + * If we reach here, all the channels are busy. Use the + * original channel found. + */ } else { spin_lock_irqsave(&stor_device->lock, flags); outgoing_channel = stor_device->stor_chns[q_num]; From 0682c53d63635992f595e4bf114473b9e668b787 Mon Sep 17 00:00:00 2001 From: Shreeya Patel Date: Wed, 17 Dec 2025 12:03:13 +0000 Subject: [PATCH 17/17] net: mana: Use page pool fragments for RX buffers instead of full pages to improve memory efficiency. jira LE-4490 commit-author Dipayaan Roy commit 730ff06d3f5cc2ce0348414b78c10528b767d4a3 upstream-diff This patch was causing build failures due to missing commit 0f9214046893 ("memory-provider: dmabuf devmem memory provider") To fix it, we have removed pprm.queue_idx parameter which seems redundant in this case. This patch enhances RX buffer handling in the mana driver by allocating pages from a page pool and slicing them into MTU-sized fragments, rather than dedicating a full page per packet. This approach is especially beneficial on systems with large base page sizes like 64KB. Key improvements: - Proper integration of page pool for RX buffer allocations. - MTU-sized buffer slicing to improve memory utilization. - Reduce overall per Rx queue memory footprint. - Automatic fallback to full-page buffers when: * Jumbo frames are enabled (MTU > PAGE_SIZE / 2). * The XDP path is active, to avoid complexities with fragment reuse. Testing on VMs with 64KB pages shows around 200% throughput improvement. Memory efficiency is significantly improved due to reduced wastage in page allocations. Example: We are now able to fit 35 rx buffers in a single 64kb page for MTU size of 1500, instead of 1 rx buffer per page previously. Tested: - iperf3, iperf2, and nttcp benchmarks. - Jumbo frames with MTU 9000. - Native XDP programs (XDP_PASS, XDP_DROP, XDP_TX, XDP_REDIRECT) for testing the XDP path in driver. - Memory leak detection (kmemleak). - Driver load/unload, reboot, and stress scenarios. Reviewed-by: Jacob Keller Reviewed-by: Saurabh Sengar Reviewed-by: Haiyang Zhang Signed-off-by: Dipayaan Roy Link: https://patch.msgid.link/20250814140410.GA22089@linuxonhyperv3.guj3yctzbm1etfxqx2vob5hsef.xx.internal.cloudapp.net Signed-off-by: Paolo Abeni (cherry picked from commit 730ff06d3f5cc2ce0348414b78c10528b767d4a3) Signed-off-by: Shreeya Patel --- .../net/ethernet/microsoft/mana/mana_bpf.c | 46 +++++- drivers/net/ethernet/microsoft/mana/mana_en.c | 150 ++++++++++++------ include/net/mana/mana.h | 4 + 3 files changed, 149 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c index d30721d4516fc..7697c9b52ed34 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -174,6 +174,7 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, struct mana_port_context *apc = netdev_priv(ndev); struct bpf_prog *old_prog; struct gdma_context *gc; + int err; gc = apc->ac->gdma_dev->gdma_context; @@ -195,11 +196,45 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, */ apc->bpf_prog = prog; - if (old_prog) - bpf_prog_put(old_prog); + if (apc->port_is_up) { + /* Re-create rxq's after xdp prog was loaded or unloaded. + * Ex: re create rxq's to switch from full pages to smaller + * size page fragments when xdp prog is unloaded and + * vice-versa. + */ + + /* Pre-allocate buffers to prevent failure in mana_attach */ + err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "XDP: Insufficient memory for tx/rx re-config"); + return err; + } + + err = mana_detach(ndev, false); + if (err) { + netdev_err(ndev, + "mana_detach failed at xdp set: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "XDP: Re-config failed at detach"); + goto err_dealloc_rxbuffs; + } + + err = mana_attach(ndev); + if (err) { + netdev_err(ndev, + "mana_attach failed at xdp set: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "XDP: Re-config failed at attach"); + goto err_dealloc_rxbuffs; + } - if (apc->port_is_up) mana_chn_setxdp(apc, prog); + mana_pre_dealloc_rxbufs(apc); + } + + if (old_prog) + bpf_prog_put(old_prog); if (prog) ndev->max_mtu = MANA_XDP_MTU_MAX; @@ -207,6 +242,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; return 0; + +err_dealloc_rxbuffs: + apc->bpf_prog = old_prog; + mana_pre_dealloc_rxbufs(apc); + return err; } int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index a1389d2e37a9a..fdb32551833cd 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -55,6 +55,15 @@ static bool mana_en_need_log(struct mana_port_context *apc, int err) return true; } +static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page, + bool from_pool) +{ + if (from_pool) + page_pool_put_full_page(rxq->page_pool, page, false); + else + put_page(page); +} + /* Microsoft Azure Network Adapter (MANA) functions */ static int mana_open(struct net_device *ndev) @@ -628,21 +637,40 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) } /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */ -static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, - u32 *headroom) +static void mana_get_rxbuf_cfg(struct mana_port_context *apc, + int mtu, u32 *datasize, u32 *alloc_size, + u32 *headroom, u32 *frag_count) { - if (mtu > MANA_XDP_MTU_MAX) - *headroom = 0; /* no support for XDP */ - else - *headroom = XDP_PACKET_HEADROOM; + u32 len, buf_size; - *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom); + /* Calculate datasize first (consistent across all cases) */ + *datasize = mtu + ETH_HLEN; - /* Using page pool in this case, so alloc_size is PAGE_SIZE */ - if (*alloc_size < PAGE_SIZE) - *alloc_size = PAGE_SIZE; + /* For xdp and jumbo frames make sure only one packet fits per page */ + if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) { + if (mana_xdp_get(apc)) { + *headroom = XDP_PACKET_HEADROOM; + *alloc_size = PAGE_SIZE; + } else { + *headroom = 0; /* no support for XDP */ + *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + + *headroom); + } - *datasize = mtu + ETH_HLEN; + *frag_count = 1; + return; + } + + /* Standard MTU case - optimize for multiple packets per page */ + *headroom = 0; + + /* Calculate base buffer size needed */ + len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom); + buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT); + + /* Calculate how many packets can fit in a page */ + *frag_count = PAGE_SIZE / buf_size; + *alloc_size = buf_size; } int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues) @@ -654,8 +682,9 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu void *va; int i; - mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, - &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); + mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize, + &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom, + &mpc->rxbpre_frag_count); dev = mpc->ac->gdma_dev->gdma_context->dev; @@ -1840,8 +1869,11 @@ static void mana_rx_skb(void *buf_va, bool from_pool, drop: if (from_pool) { - page_pool_recycle_direct(rxq->page_pool, - virt_to_head_page(buf_va)); + if (rxq->frag_count == 1) + page_pool_recycle_direct(rxq->page_pool, + virt_to_head_page(buf_va)); + else + page_pool_free_va(rxq->page_pool, buf_va, true); } else { WARN_ON_ONCE(rxq->xdp_save_va); /* Save for reuse */ @@ -1857,33 +1889,46 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, dma_addr_t *da, bool *from_pool) { struct page *page; + u32 offset; void *va; - *from_pool = false; - /* Reuse XDP dropped page if available */ - if (rxq->xdp_save_va) { - va = rxq->xdp_save_va; - rxq->xdp_save_va = NULL; - } else { - page = page_pool_dev_alloc_pages(rxq->page_pool); - if (!page) + /* Don't use fragments for jumbo frames or XDP where it's 1 fragment + * per page. + */ + if (rxq->frag_count == 1) { + /* Reuse XDP dropped page if available */ + if (rxq->xdp_save_va) { + va = rxq->xdp_save_va; + page = virt_to_head_page(va); + rxq->xdp_save_va = NULL; + } else { + page = page_pool_dev_alloc_pages(rxq->page_pool); + if (!page) + return NULL; + + *from_pool = true; + va = page_to_virt(page); + } + + *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *da)) { + mana_put_rx_page(rxq, page, *from_pool); return NULL; + } - *from_pool = true; - va = page_to_virt(page); + return va; } - *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *da)) { - if (*from_pool) - page_pool_put_full_page(rxq->page_pool, page, false); - else - put_page(virt_to_head_page(va)); - + page = page_pool_dev_alloc_frag(rxq->page_pool, &offset, + rxq->alloc_size); + if (!page) return NULL; - } + + va = page_to_virt(page) + offset; + *da = page_pool_get_dma_addr(page) + offset + rxq->headroom; + *from_pool = true; return va; } @@ -1900,9 +1945,9 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, va = mana_get_rxfrag(rxq, dev, &da, &from_pool); if (!va) return; - - dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, - DMA_FROM_DEVICE); + if (!rxoob->from_pool || rxq->frag_count == 1) + dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, + DMA_FROM_DEVICE); *old_buf = rxoob->buf_va; *old_fp = rxoob->from_pool; @@ -2307,15 +2352,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc, if (!rx_oob->buf_va) continue; - dma_unmap_single(dev, rx_oob->sgl[0].address, - rx_oob->sgl[0].size, DMA_FROM_DEVICE); - page = virt_to_head_page(rx_oob->buf_va); - if (rx_oob->from_pool) - page_pool_put_full_page(rxq->page_pool, page, false); - else - put_page(page); + if (rxq->frag_count == 1 || !rx_oob->from_pool) { + dma_unmap_single(dev, rx_oob->sgl[0].address, + rx_oob->sgl[0].size, DMA_FROM_DEVICE); + mana_put_rx_page(rxq, page, rx_oob->from_pool); + } else { + page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true); + } rx_oob->buf_va = NULL; } @@ -2421,11 +2466,21 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) struct page_pool_params pprm = {}; int ret; - pprm.pool_size = mpc->rx_queue_size; + pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1; pprm.nid = gc->numa_node; pprm.napi = &rxq->rx_cq.napi; pprm.netdev = rxq->ndev; pprm.order = get_order(rxq->alloc_size); + pprm.dev = gc->dev; + + /* Let the page pool do the dma map when page sharing with multiple + * fragments enabled for rx buffers. + */ + if (rxq->frag_count > 1) { + pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pprm.max_len = PAGE_SIZE; + pprm.dma_dir = DMA_FROM_DEVICE; + } rxq->page_pool = page_pool_create(&pprm); @@ -2464,9 +2519,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, rxq->rxq_idx = rxq_idx; rxq->rxobj = INVALID_MANA_HANDLE; - mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, - &rxq->headroom); - + mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size, + &rxq->headroom, &rxq->frag_count); /* Create page pool for RX queue */ err = mana_create_page_pool(rxq, gc); if (err) { diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index e1030a7d2daab..0921485565c05 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -65,6 +65,8 @@ enum TRI_STATE { #define MANA_STATS_RX_COUNT 5 #define MANA_STATS_TX_COUNT 11 +#define MANA_RX_FRAG_ALIGNMENT 64 + struct mana_stats_rx { u64 packets; u64 bytes; @@ -328,6 +330,7 @@ struct mana_rxq { u32 datasize; u32 alloc_size; u32 headroom; + u32 frag_count; mana_handle_t rxobj; @@ -510,6 +513,7 @@ struct mana_port_context { u32 rxbpre_datasize; u32 rxbpre_alloc_size; u32 rxbpre_headroom; + u32 rxbpre_frag_count; struct bpf_prog *bpf_prog;