[Intel-wired-lan] [PATCH iwl-next v1] ice: Add E830 checksum support
Alexander Lobakin
aleksander.lobakin at intel.com
Fri Aug 30 13:30:11 UTC 2024
From: Paul Greenwalt <paul.greenwalt at intel.com>
Date: Mon, 26 Aug 2024 13:39:16 -0400
> E830 supports generic receive and HW_CSUM transmit checksumming.
>
> Generic receive checksum support is provided by hardware calculating the
> checksum over the whole packet and providing it to the driver in the Rx
> flex descriptor. Then the driver assigns the checksum to skb-->csum and
> sets skb->ip_summed to CHECKSUM_COMPLETE.
>
> E830 HW_CSUM transmit checksum does not support TCP Segmentation Offload
Why is it so?
> (TSO) inner packet modification, so TSO and HW_CSUM are mutually exclusive.
What is HW_CSUM in your opinion here?
> Therefore NETIF_F_HW_CSUM hardware feature support is indicated but is not
> enabled by default. Instead, IP checksum and TSO are the defaults.
> Enforcement of mutual exclusivity of TSO and HW_CSUM is done in
> ice_fix_features(). Mutual exclusivity of IP checksum and HW_CSUM is
> handled by netdev_fix_features().
>
> When NETIF_F_HW_CSUM is requested the provide skb->csum_start and
"the provided"?
> skb->csum_offset are passed to hardware in the Tx context descriptor
> generic checksum (GCS) parameters. Hardware calculates the 1's complement
> from skb->csum_start to the end of the packet, and inserts the result in
> the packet at skb->csum_offset.
>
> Co-developed-by: Alice Michael <alice.michael at intel.com>
> Signed-off-by: Alice Michael <alice.michael at intel.com>
> Co-developed-by: Eric Joyner <eric.joyner at intel.com>
> Signed-off-by: Eric Joyner <eric.joyner at intel.com>
> Signed-off-by: Paul Greenwalt <paul.greenwalt at intel.com>
[...]
> diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
> index f559e60992fa..118ac34f89e9 100644
> --- a/drivers/net/ethernet/intel/ice/ice_lib.c
> +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
> @@ -1380,6 +1380,9 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
> ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
> else
> ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
> +
> + if (ice_is_feature_supported(pf, ICE_F_GCS))
> + ring->flags |= ICE_TXRX_FLAGS_GCS_ENA;
An empty newline here, since WRITE_ONCE() is not related to this one?
> WRITE_ONCE(vsi->tx_rings[i], ring);
> }
>
> @@ -1399,6 +1402,9 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
> ring->dev = dev;
> ring->count = vsi->num_rx_desc;
> ring->cached_phctime = pf->ptp.cached_phc_time;
> +
> + if (ice_is_feature_supported(pf, ICE_F_GCS))
> + ring->flags |= ICE_TXRX_FLAGS_GCS_ENA;
Same here.
> WRITE_ONCE(vsi->rx_rings[i], ring);
> }
>
> @@ -3896,6 +3902,9 @@ void ice_init_feature_support(struct ice_pf *pf)
> default:
> break;
> }
> +
> + if (pf->hw.mac_type == ICE_MAC_E830)
> + ice_set_feature_support(pf, ICE_F_GCS);
> }
>
> /**
> diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
> index 6f97ed471fe9..67e32ac962df 100644
> --- a/drivers/net/ethernet/intel/ice/ice_main.c
> +++ b/drivers/net/ethernet/intel/ice/ice_main.c
> @@ -3678,6 +3678,12 @@ static void ice_set_netdev_features(struct net_device *netdev)
> */
> netdev->hw_features |= NETIF_F_RXFCS;
>
> + /* Mutual exclusivity for TSO and GCS is enforced by the
> + * ice_fix_features() ndo callback.
> + */
> + if (ice_is_feature_supported(pf, ICE_F_GCS))
> + netdev->hw_features |= NETIF_F_HW_CSUM;
> +
> netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
> }
>
> @@ -6237,6 +6243,7 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
> struct ice_netdev_priv *np = netdev_priv(netdev);
> netdev_features_t req_vlan_fltr, cur_vlan_fltr;
> bool cur_ctag, cur_stag, req_ctag, req_stag;
> + netdev_features_t changed;
>
> cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
> cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
> @@ -6285,6 +6292,29 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
> features &= ~NETIF_VLAN_STRIPPING_FEATURES;
> }
>
> + if (!ice_is_feature_supported(np->vsi->back, ICE_F_GCS) ||
> + !(features & NETIF_F_HW_CSUM))
> + return features;
> +
> + changed = netdev->features ^ features;
> +
> + /* HW checksum feature is supported and set, so enforce mutual
> + * exclusivity of TSO and GCS.
> + */
> + if (features & NETIF_F_ALL_TSO) {
if (!(features & ALL_TSO))
return features;
to reduce indent level and avoid huge `if` blocks.
> + if (changed & NETIF_F_HW_CSUM && changed & NETIF_F_ALL_TSO) {
> + netdev_warn(netdev, "Dropping TSO and HW checksum. TSO and HW checksum are mutually exclusive.\n");
> + features &= ~NETIF_F_HW_CSUM;
> + features &= ~((features & changed) & NETIF_F_ALL_TSO);
> + } else if (changed & NETIF_F_HW_CSUM) {
> + netdev_warn(netdev, "Dropping HW checksum. TSO and HW checksum are mutually exclusive.\n");
> + features &= ~NETIF_F_HW_CSUM;
> + } else {
> + netdev_warn(netdev, "Dropping TSO. TSO and HW checksum are mutually exclusive.\n");
> + features &= ~NETIF_F_ALL_TSO;
> + }
> + }
> +
> return features;
> }
>
> diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
> index 8d25b6981269..bfcce1eab243 100644
> --- a/drivers/net/ethernet/intel/ice/ice_txrx.c
> +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
> @@ -1792,6 +1792,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
> static
> int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
> {
> + const struct ice_tx_ring *tx_ring = off->tx_ring;
> u32 l4_len = 0, l3_len = 0, l2_len = 0;
> struct sk_buff *skb = first->skb;
> union {
> @@ -1941,6 +1942,29 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
> l3_len = l4.hdr - ip.hdr;
> offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
>
> +#define TX_GCS_ENABLED 1
This must be somewhere where the offload params or descriptor values are
described, i.e. next to the related definitions.
> + if (tx_ring->netdev->features & NETIF_F_HW_CSUM &&
Please give bitops a separate set of parenthesis
if ((features & HW_CSUM) &&
etc., below as well.
> + tx_ring->flags & ICE_TXRX_FLAGS_GCS_ENA &&
> + !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
> + !skb_csum_is_sctp(skb)) {
> + /* Set GCS */
> + u16 gcs_params = ((skb->csum_start - skb->mac_header) / 2) <<
> + ICE_TX_GCS_DESC_START;
This must be
gcs_params = FIELD_PREP(GCS_DESC_MASK, (skb...) / 2)
2 places below as well.
> + gcs_params |= (skb->csum_offset / 2) << ICE_TX_GCS_DESC_OFFSET;
> + /* Type is 1 for 16-bit TCP/UDP checksums w/ pseudo */
> + gcs_params |= TX_GCS_ENABLED << ICE_TX_GCS_DESC_TYPE;
> +
> + /* Unlike legacy HW checksums, GCS requires a context
> + * descriptor.
> + */
> + off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX);
Redundant cast.
> + off->cd_gcs_params = gcs_params;
> + /* Fill out CSO info in data descriptors */
> + off->td_offset |= offset;
> + off->td_cmd |= cmd;
> + return 1;
> + }
[...]
> @@ -366,6 +367,7 @@ struct ice_rx_ring {
> #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
> #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
> #define ICE_RX_FLAGS_MULTIDEV BIT(3)
> +#define ICE_TXRX_FLAGS_GCS_ENA BIT(4)
As Tony said, RX and TX features are now separated, just like Rx and Tx
ring structures. Please define them separately for Rx and Tx.
> u8 flags;
> /* CL5 - 5th cacheline starts here */
> struct xdp_rxq_info xdp_rxq;
> diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
> index 2719f0e20933..a344886d2129 100644
> --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
> +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
> @@ -80,6 +80,24 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
> libeth_rx_pt_set_hash(skb, hash, decoded);
> }
>
> +/**
> + * ice_rx_gcs - Set generic checksum in skd
"skb"
> + * @skb: skb currently being received and modified
> + * @rx_desc: Receive descriptor
Lowercase for "Receive" I'd say.
> + * Returns hash, if present, 0 otherwise.
The function returns void.
> + */
> +static void ice_rx_gcs(struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc)
const union.
> +{
> + struct ice_32b_rx_flex_desc_nic *nic_csum;
It's a descriptor, not a csum. + also const.
> +
> + if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
> + return;
Redundant check since you're checking this below.
> +
> + nic_csum = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
> + skb->ip_summed = CHECKSUM_COMPLETE;
> + skb->csum = (__force __wsum)htons(le16_to_cpu(nic_csum->raw_csum));
htons(le16_to_cpu(x)) is the same as swab16(x), have you tried it?
> +}
> +
> /**
> * ice_rx_csum - Indicate in skb if checksum is good
> * @ring: the ring we care about
> @@ -107,6 +125,21 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
> rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
> rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
>
> + if (rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
> + ring->flags & ICE_TXRX_FLAGS_GCS_ENA &&
I'd reorder these. 1 flags, 2 flex.
> + (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
> + decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
> + decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
> + /* ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S is overloaded in the
> + * rx_status1 layout to indicate that the extracted data from
> + * the packet is valid.
> + */
> + if (rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
> + return ice_rx_gcs(skb, rx_desc);
ice_rx_csum() is void. Please separate the call and the return.
> +
> + goto checksum_fail;
Why fail? If there's no STATUS1_L2TAG2P_S bit, can't there be a usual
checksum status?
> + }
> +
> /* check if HW has decoded the packet and checksum */
> if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
> return;
Thanks,
Olek
More information about the Intel-wired-lan
mailing list