[Intel-wired-lan] [PATCH RFC net-next 17/34] libie: support native XDP and register memory model

Alexander Lobakin aleksander.lobakin at intel.com
Sat Dec 23 02:55:37 UTC 2023


Expand libie's Page Pool functionality by adding native XDP support.
This means picking the appropriate headroom and DMA direction.
Also, register all the created &page_pools as XDP memory models.
A driver then can call xdp_rxq_info_attach_page_pool() when registering
its RxQ info.

Signed-off-by: Alexander Lobakin <aleksander.lobakin at intel.com>
---
 drivers/net/ethernet/intel/libie/rx.c | 32 ++++++++++++++++++++++-----
 include/linux/net/intel/libie/rx.h    |  6 ++++-
 2 files changed, 32 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index 3d3b19d2b40d..b4c404958f25 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -52,7 +52,7 @@ static u32 libie_rx_hw_len_truesize(const struct page_pool_params *pp,
 static void libie_rx_page_pool_params(struct libie_buf_queue *bq,
 				      struct page_pool_params *pp)
 {
-	pp->offset = LIBIE_SKB_HEADROOM;
+	pp->offset = bq->xdp ? LIBIE_XDP_HEADROOM : LIBIE_SKB_HEADROOM;
 	/* HW-writeable / syncable length per one page */
 	pp->max_len = LIBIE_RX_BUF_LEN(pp->offset);
 
@@ -132,17 +132,34 @@ int libie_rx_page_pool_create(struct libie_buf_queue *bq,
 		.dev		= napi->dev->dev.parent,
 		.netdev		= napi->dev,
 		.napi		= napi,
-		.dma_dir	= DMA_FROM_DEVICE,
 	};
+	struct xdp_mem_info mem;
+	struct page_pool *pool;
+	int ret;
+
+	pp.dma_dir = bq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
 
 	if (!bq->hsplit)
 		libie_rx_page_pool_params(bq, &pp);
 	else if (!libie_rx_page_pool_params_zc(bq, &pp))
 		return -EINVAL;
 
-	bq->pp = page_pool_create(&pp);
+	pool = page_pool_create(&pp);
+	if (IS_ERR(pool))
+		return PTR_ERR(pool);
+
+	ret = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pool);
+	if (ret)
+		goto err_mem;
+
+	bq->pp = pool;
+
+	return 0;
 
-	return PTR_ERR_OR_ZERO(bq->pp);
+err_mem:
+	page_pool_destroy(pool);
+
+	return ret;
 }
 EXPORT_SYMBOL_NS_GPL(libie_rx_page_pool_create, LIBIE);
 
@@ -152,7 +169,12 @@ EXPORT_SYMBOL_NS_GPL(libie_rx_page_pool_create, LIBIE);
  */
 void libie_rx_page_pool_destroy(struct libie_buf_queue *bq)
 {
-	page_pool_destroy(bq->pp);
+	struct xdp_mem_info mem = {
+		.type	= MEM_TYPE_PAGE_POOL,
+		.id	= bq->pp->xdp_mem_id,
+	};
+
+	xdp_unreg_mem_model(&mem);
 	bq->pp = NULL;
 }
 EXPORT_SYMBOL_NS_GPL(libie_rx_page_pool_destroy, LIBIE);
diff --git a/include/linux/net/intel/libie/rx.h b/include/linux/net/intel/libie/rx.h
index 87ad8f9e89c7..8eda4ac8028c 100644
--- a/include/linux/net/intel/libie/rx.h
+++ b/include/linux/net/intel/libie/rx.h
@@ -15,8 +15,10 @@
 
 /* Space reserved in front of each frame */
 #define LIBIE_SKB_HEADROOM	(NET_SKB_PAD + NET_IP_ALIGN)
+#define LIBIE_XDP_HEADROOM	(ALIGN(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+				 NET_IP_ALIGN)
 /* Maximum headroom to calculate max MTU below */
-#define LIBIE_MAX_HEADROOM	LIBIE_SKB_HEADROOM
+#define LIBIE_MAX_HEADROOM	LIBIE_XDP_HEADROOM
 /* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
 #define LIBIE_RX_LL_LEN		(ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
 /* Maximum supported L2-L4 header length */
@@ -87,6 +89,7 @@ enum libie_rx_buf_type {
  * @rx_buf_len: HW-writeable length per each buffer
  * @type: type of the buffers this queue has
  * @hsplit: flag whether header split is enabled
+ * @xdp: flag indicating whether XDP is enabled
  */
 struct libie_buf_queue {
 	struct page_pool	*pp;
@@ -100,6 +103,7 @@ struct libie_buf_queue {
 	enum libie_rx_buf_type	type:2;
 
 	bool			hsplit:1;
+	bool			xdp:1;
 };
 
 int libie_rx_page_pool_create(struct libie_buf_queue *bq,
-- 
2.43.0



More information about the Intel-wired-lan mailing list