Skip to content

Commit eb9a36b

Browse files
cmclachl-xilinxdavem330
authored andcommitted
sfc: perform XDP processing on received packets
Adds a field to hold an attached xdp_prog, but never populates it (see following patch). Also, XDP_TX support is deferred to a later patch in the series. Track failures of xdp_rxq_info_reg() via per-queue xdp_rxq_info_valid flags and a per-nic xdp_rxq_info_failed flag. The per-queue flags are needed to prevent attempts to xdp_rxq_info_unreg() structs that failed to register. Possibly the API could be changed in the future to avoid the need for these flags. Signed-off-by: Charles McLachlan <[email protected]> Acked-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 8c42350 commit eb9a36b

File tree

3 files changed

+145
-2
lines changed

3 files changed

+145
-2
lines changed

drivers/net/ethernet/sfc/efx.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
340340

341341
spent = efx_process_channel(channel, budget);
342342

343+
xdp_do_flush_map();
344+
343345
if (spent < budget) {
344346
if (efx_channel_has_rx_queue(channel) &&
345347
efx->irq_rx_adaptive &&
@@ -651,7 +653,7 @@ static void efx_start_datapath(struct efx_nic *efx)
651653
efx->rx_dma_len = (efx->rx_prefix_size +
652654
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
653655
efx->type->rx_buffer_padding);
654-
rx_buf_len = (sizeof(struct efx_rx_page_state) +
656+
rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
655657
efx->rx_ip_align + efx->rx_dma_len);
656658
if (rx_buf_len <= PAGE_SIZE) {
657659
efx->rx_scatter = efx->type->always_rx_scatter;
@@ -774,6 +776,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
774776
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
775777
efx_fini_tx_queue(tx_queue);
776778
}
779+
efx->xdp_rxq_info_failed = false;
777780
}
778781

779782
static void efx_remove_channel(struct efx_channel *channel)

drivers/net/ethernet/sfc/net_driver.h

+12
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,8 @@ struct efx_rx_page_state {
369369
* refill was triggered.
370370
* @recycle_count: RX buffer recycle counter.
371371
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
372+
* @xdp_rxq_info: XDP specific RX queue information.
373+
* @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
372374
*/
373375
struct efx_rx_queue {
374376
struct efx_nic *efx;
@@ -400,6 +402,8 @@ struct efx_rx_queue {
400402
unsigned int slow_fill_count;
401403
/* Statistics to supplement MAC stats */
402404
unsigned long rx_packets;
405+
struct xdp_rxq_info xdp_rxq_info;
406+
bool xdp_rxq_info_valid;
403407
};
404408

405409
enum efx_sync_events_state {
@@ -900,6 +904,7 @@ struct efx_async_filter_insertion {
900904
* @loopback_mode: Loopback status
901905
* @loopback_modes: Supported loopback mode bitmask
902906
* @loopback_selftest: Offline self-test private state
907+
* @xdp_prog: Current XDP programme for this interface
903908
* @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
904909
* @filter_state: Architecture-dependent filter table state
905910
* @rps_mutex: Protects RPS state of all channels
@@ -925,6 +930,8 @@ struct efx_async_filter_insertion {
925930
* @ptp_data: PTP state data
926931
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
927932
* @vpd_sn: Serial number read from VPD
933+
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
934+
* xdp_rxq_info structures?
928935
* @monitor_work: Hardware monitor workitem
929936
* @biu_lock: BIU (bus interface unit) lock
930937
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1059,6 +1066,10 @@ struct efx_nic {
10591066
u64 loopback_modes;
10601067

10611068
void *loopback_selftest;
1069+
/* We access loopback_selftest immediately before running XDP,
1070+
* so we want them next to each other.
1071+
*/
1072+
struct bpf_prog __rcu *xdp_prog;
10621073

10631074
struct rw_semaphore filter_sem;
10641075
void *filter_state;
@@ -1088,6 +1099,7 @@ struct efx_nic {
10881099
bool ptp_warned;
10891100

10901101
char *vpd_sn;
1102+
bool xdp_rxq_info_failed;
10911103

10921104
/* The following fields may be written more often */
10931105

drivers/net/ethernet/sfc/rx.c

+129-1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
#include <linux/iommu.h>
1818
#include <net/ip.h>
1919
#include <net/checksum.h>
20+
#include <net/xdp.h>
21+
#include <linux/bpf_trace.h>
2022
#include "net_driver.h"
2123
#include "efx.h"
2224
#include "filter.h"
@@ -27,6 +29,9 @@
2729
/* Preferred number of descriptors to fill at once */
2830
#define EFX_RX_PREFERRED_BATCH 8U
2931

32+
/* Maximum rx prefix used by any architecture. */
33+
#define EFX_MAX_RX_PREFIX_SIZE 16
34+
3035
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
3136
* ring, this number is divided by the number of buffers per page to calculate
3237
* the number of pages to store in the RX page recycle ring.
@@ -95,7 +100,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
95100
EFX_RX_BUF_ALIGNMENT);
96101
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
97102
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
98-
efx->rx_page_buf_step);
103+
(efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
99104
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
100105
efx->rx_bufs_per_page;
101106
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@@ -185,6 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
185190
page_offset = sizeof(struct efx_rx_page_state);
186191

187192
do {
193+
page_offset += XDP_PACKET_HEADROOM;
194+
dma_addr += XDP_PACKET_HEADROOM;
195+
188196
index = rx_queue->added_count & rx_queue->ptr_mask;
189197
rx_buf = efx_rx_buffer(rx_queue, index);
190198
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
@@ -635,6 +643,104 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
635643
netif_receive_skb(skb);
636644
}
637645

646+
/** efx_do_xdp: perform XDP processing on a received packet
647+
*
648+
* Returns true if packet should still be delivered.
649+
*/
650+
static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
651+
struct efx_rx_buffer *rx_buf, u8 **ehp)
652+
{
653+
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
654+
struct efx_rx_queue *rx_queue;
655+
struct bpf_prog *xdp_prog;
656+
struct xdp_buff xdp;
657+
u32 xdp_act;
658+
s16 offset;
659+
int err;
660+
661+
rcu_read_lock();
662+
xdp_prog = rcu_dereference(efx->xdp_prog);
663+
if (!xdp_prog) {
664+
rcu_read_unlock();
665+
return true;
666+
}
667+
668+
rx_queue = efx_channel_get_rx_queue(channel);
669+
670+
if (unlikely(channel->rx_pkt_n_frags > 1)) {
671+
/* We can't do XDP on fragmented packets - drop. */
672+
rcu_read_unlock();
673+
efx_free_rx_buffers(rx_queue, rx_buf,
674+
channel->rx_pkt_n_frags);
675+
if (net_ratelimit())
676+
netif_err(efx, rx_err, efx->net_dev,
677+
"XDP is not possible with multiple receive fragments (%d)\n",
678+
channel->rx_pkt_n_frags);
679+
return false;
680+
}
681+
682+
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
683+
rx_buf->len, DMA_FROM_DEVICE);
684+
685+
/* Save the rx prefix. */
686+
EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
687+
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
688+
efx->rx_prefix_size);
689+
690+
xdp.data = *ehp;
691+
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
692+
693+
/* No support yet for XDP metadata */
694+
xdp_set_data_meta_invalid(&xdp);
695+
xdp.data_end = xdp.data + rx_buf->len;
696+
xdp.rxq = &rx_queue->xdp_rxq_info;
697+
698+
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
699+
rcu_read_unlock();
700+
701+
offset = (u8 *)xdp.data - *ehp;
702+
703+
switch (xdp_act) {
704+
case XDP_PASS:
705+
/* Fix up rx prefix. */
706+
if (offset) {
707+
*ehp += offset;
708+
rx_buf->page_offset += offset;
709+
rx_buf->len -= offset;
710+
memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
711+
efx->rx_prefix_size);
712+
}
713+
break;
714+
715+
case XDP_TX:
716+
return -EOPNOTSUPP;
717+
718+
case XDP_REDIRECT:
719+
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
720+
if (unlikely(err)) {
721+
efx_free_rx_buffers(rx_queue, rx_buf, 1);
722+
if (net_ratelimit())
723+
netif_err(efx, rx_err, efx->net_dev,
724+
"XDP redirect failed (%d)\n", err);
725+
}
726+
break;
727+
728+
default:
729+
bpf_warn_invalid_xdp_action(xdp_act);
730+
efx_free_rx_buffers(rx_queue, rx_buf, 1);
731+
break;
732+
733+
case XDP_ABORTED:
734+
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
735+
/* Fall through */
736+
case XDP_DROP:
737+
efx_free_rx_buffers(rx_queue, rx_buf, 1);
738+
break;
739+
}
740+
741+
return xdp_act == XDP_PASS;
742+
}
743+
638744
/* Handle a received packet. Second half: Touches packet payload. */
639745
void __efx_rx_packet(struct efx_channel *channel)
640746
{
@@ -663,6 +769,9 @@ void __efx_rx_packet(struct efx_channel *channel)
663769
goto out;
664770
}
665771

772+
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
773+
goto out;
774+
666775
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
667776
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
668777

@@ -731,6 +840,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
731840
{
732841
struct efx_nic *efx = rx_queue->efx;
733842
unsigned int max_fill, trigger, max_trigger;
843+
int rc = 0;
734844

735845
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
736846
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -764,6 +874,19 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
764874
rx_queue->fast_fill_trigger = trigger;
765875
rx_queue->refill_enabled = true;
766876

877+
/* Initialise XDP queue information */
878+
rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
879+
rx_queue->core_index);
880+
881+
if (rc) {
882+
netif_err(efx, rx_err, efx->net_dev,
883+
"Failure to initialise XDP queue information rc=%d\n",
884+
rc);
885+
efx->xdp_rxq_info_failed = true;
886+
} else {
887+
rx_queue->xdp_rxq_info_valid = true;
888+
}
889+
767890
/* Set up RX descriptor ring */
768891
efx_nic_init_rx(rx_queue);
769892
}
@@ -805,6 +928,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
805928
}
806929
kfree(rx_queue->page_ring);
807930
rx_queue->page_ring = NULL;
931+
932+
if (rx_queue->xdp_rxq_info_valid)
933+
xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
934+
935+
rx_queue->xdp_rxq_info_valid = false;
808936
}
809937

810938
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)

0 commit comments

Comments
 (0)