|
17 | 17 | #include <linux/iommu.h>
|
18 | 18 | #include <net/ip.h>
|
19 | 19 | #include <net/checksum.h>
|
| 20 | +#include <net/xdp.h> |
| 21 | +#include <linux/bpf_trace.h> |
20 | 22 | #include "net_driver.h"
|
21 | 23 | #include "efx.h"
|
22 | 24 | #include "filter.h"
|
|
27 | 29 | /* Preferred number of descriptors to fill at once */
|
28 | 30 | #define EFX_RX_PREFERRED_BATCH 8U
|
29 | 31 |
|
| 32 | +/* Maximum rx prefix used by any architecture. */ |
| 33 | +#define EFX_MAX_RX_PREFIX_SIZE 16 |
| 34 | + |
30 | 35 | /* Number of RX buffers to recycle pages for. When creating the RX page recycle
|
31 | 36 | * ring, this number is divided by the number of buffers per page to calculate
|
32 | 37 | * the number of pages to store in the RX page recycle ring.
|
@@ -95,7 +100,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
|
95 | 100 | EFX_RX_BUF_ALIGNMENT);
|
96 | 101 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
|
97 | 102 | ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
|
98 |
| - efx->rx_page_buf_step); |
| 103 | + (efx->rx_page_buf_step + XDP_PACKET_HEADROOM)); |
99 | 104 | efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
|
100 | 105 | efx->rx_bufs_per_page;
|
101 | 106 | efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
|
@@ -185,6 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
|
185 | 190 | page_offset = sizeof(struct efx_rx_page_state);
|
186 | 191 |
|
187 | 192 | do {
|
| 193 | + page_offset += XDP_PACKET_HEADROOM; |
| 194 | + dma_addr += XDP_PACKET_HEADROOM; |
| 195 | + |
188 | 196 | index = rx_queue->added_count & rx_queue->ptr_mask;
|
189 | 197 | rx_buf = efx_rx_buffer(rx_queue, index);
|
190 | 198 | rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
|
@@ -635,6 +643,104 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
|
635 | 643 | netif_receive_skb(skb);
|
636 | 644 | }
|
637 | 645 |
|
| 646 | +/** efx_do_xdp: perform XDP processing on a received packet |
| 647 | + * |
| 648 | + * Returns true if packet should still be delivered. |
| 649 | + */ |
| 650 | +static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, |
| 651 | + struct efx_rx_buffer *rx_buf, u8 **ehp) |
| 652 | +{ |
| 653 | + u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE]; |
| 654 | + struct efx_rx_queue *rx_queue; |
| 655 | + struct bpf_prog *xdp_prog; |
| 656 | + struct xdp_buff xdp; |
| 657 | + u32 xdp_act; |
| 658 | + s16 offset; |
| 659 | + int err; |
| 660 | + |
| 661 | + rcu_read_lock(); |
| 662 | + xdp_prog = rcu_dereference(efx->xdp_prog); |
| 663 | + if (!xdp_prog) { |
| 664 | + rcu_read_unlock(); |
| 665 | + return true; |
| 666 | + } |
| 667 | + |
| 668 | + rx_queue = efx_channel_get_rx_queue(channel); |
| 669 | + |
| 670 | + if (unlikely(channel->rx_pkt_n_frags > 1)) { |
| 671 | + /* We can't do XDP on fragmented packets - drop. */ |
| 672 | + rcu_read_unlock(); |
| 673 | + efx_free_rx_buffers(rx_queue, rx_buf, |
| 674 | + channel->rx_pkt_n_frags); |
| 675 | + if (net_ratelimit()) |
| 676 | + netif_err(efx, rx_err, efx->net_dev, |
| 677 | + "XDP is not possible with multiple receive fragments (%d)\n", |
| 678 | + channel->rx_pkt_n_frags); |
| 679 | + return false; |
| 680 | + } |
| 681 | + |
| 682 | + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, |
| 683 | + rx_buf->len, DMA_FROM_DEVICE); |
| 684 | + |
| 685 | + /* Save the rx prefix. */ |
| 686 | + EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE); |
| 687 | + memcpy(rx_prefix, *ehp - efx->rx_prefix_size, |
| 688 | + efx->rx_prefix_size); |
| 689 | + |
| 690 | + xdp.data = *ehp; |
| 691 | + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; |
| 692 | + |
| 693 | + /* No support yet for XDP metadata */ |
| 694 | + xdp_set_data_meta_invalid(&xdp); |
| 695 | + xdp.data_end = xdp.data + rx_buf->len; |
| 696 | + xdp.rxq = &rx_queue->xdp_rxq_info; |
| 697 | + |
| 698 | + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 699 | + rcu_read_unlock(); |
| 700 | + |
| 701 | + offset = (u8 *)xdp.data - *ehp; |
| 702 | + |
| 703 | + switch (xdp_act) { |
| 704 | + case XDP_PASS: |
| 705 | + /* Fix up rx prefix. */ |
| 706 | + if (offset) { |
| 707 | + *ehp += offset; |
| 708 | + rx_buf->page_offset += offset; |
| 709 | + rx_buf->len -= offset; |
| 710 | + memcpy(*ehp - efx->rx_prefix_size, rx_prefix, |
| 711 | + efx->rx_prefix_size); |
| 712 | + } |
| 713 | + break; |
| 714 | + |
| 715 | + case XDP_TX: |
| 716 | + return -EOPNOTSUPP; |
| 717 | + |
| 718 | + case XDP_REDIRECT: |
| 719 | + err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); |
| 720 | + if (unlikely(err)) { |
| 721 | + efx_free_rx_buffers(rx_queue, rx_buf, 1); |
| 722 | + if (net_ratelimit()) |
| 723 | + netif_err(efx, rx_err, efx->net_dev, |
| 724 | + "XDP redirect failed (%d)\n", err); |
| 725 | + } |
| 726 | + break; |
| 727 | + |
| 728 | + default: |
| 729 | + bpf_warn_invalid_xdp_action(xdp_act); |
| 730 | + efx_free_rx_buffers(rx_queue, rx_buf, 1); |
| 731 | + break; |
| 732 | + |
| 733 | + case XDP_ABORTED: |
| 734 | + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); |
| 735 | + /* Fall through */ |
| 736 | + case XDP_DROP: |
| 737 | + efx_free_rx_buffers(rx_queue, rx_buf, 1); |
| 738 | + break; |
| 739 | + } |
| 740 | + |
| 741 | + return xdp_act == XDP_PASS; |
| 742 | +} |
| 743 | + |
638 | 744 | /* Handle a received packet. Second half: Touches packet payload. */
|
639 | 745 | void __efx_rx_packet(struct efx_channel *channel)
|
640 | 746 | {
|
@@ -663,6 +769,9 @@ void __efx_rx_packet(struct efx_channel *channel)
|
663 | 769 | goto out;
|
664 | 770 | }
|
665 | 771 |
|
| 772 | + if (!efx_do_xdp(efx, channel, rx_buf, &eh)) |
| 773 | + goto out; |
| 774 | + |
666 | 775 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
667 | 776 | rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
668 | 777 |
|
@@ -731,6 +840,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
731 | 840 | {
|
732 | 841 | struct efx_nic *efx = rx_queue->efx;
|
733 | 842 | unsigned int max_fill, trigger, max_trigger;
|
| 843 | + int rc = 0; |
734 | 844 |
|
735 | 845 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
|
736 | 846 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
|
@@ -764,6 +874,19 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
|
764 | 874 | rx_queue->fast_fill_trigger = trigger;
|
765 | 875 | rx_queue->refill_enabled = true;
|
766 | 876 |
|
| 877 | + /* Initialise XDP queue information */ |
| 878 | + rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, |
| 879 | + rx_queue->core_index); |
| 880 | + |
| 881 | + if (rc) { |
| 882 | + netif_err(efx, rx_err, efx->net_dev, |
| 883 | + "Failure to initialise XDP queue information rc=%d\n", |
| 884 | + rc); |
| 885 | + efx->xdp_rxq_info_failed = true; |
| 886 | + } else { |
| 887 | + rx_queue->xdp_rxq_info_valid = true; |
| 888 | + } |
| 889 | + |
767 | 890 | /* Set up RX descriptor ring */
|
768 | 891 | efx_nic_init_rx(rx_queue);
|
769 | 892 | }
|
@@ -805,6 +928,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
|
805 | 928 | }
|
806 | 929 | kfree(rx_queue->page_ring);
|
807 | 930 | rx_queue->page_ring = NULL;
|
| 931 | + |
| 932 | + if (rx_queue->xdp_rxq_info_valid) |
| 933 | + xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); |
| 934 | + |
| 935 | + rx_queue->xdp_rxq_info_valid = false; |
808 | 936 | }
|
809 | 937 |
|
810 | 938 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
|
|
0 commit comments