]> Gentwo Git Trees - linux/.git/commitdiff
netmem, devmem, tcp: access pp fields through @desc in net_iov
authorByungchul Park <byungchul@sk.com>
Wed, 26 Nov 2025 04:36:46 +0000 (13:36 +0900)
committerJakub Kicinski <kuba@kernel.org>
Fri, 28 Nov 2025 01:41:51 +0000 (17:41 -0800)
Convert all the legacy code directly accessing the pp fields in net_iov
to access them through @desc in net_iov.

Signed-off-by: Byungchul Park <byungchul@sk.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/linux/skbuff.h
net/core/devmem.c
net/ipv4/tcp.c

index ff90281ddf90ee3a2fa5d1b0c31d843ee4f90054..86737076101d4a8452e90fe78adcdcfdefb79169 100644 (file)
@@ -3778,8 +3778,8 @@ static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
                                            enum dma_data_direction dir)
 {
        if (skb_frag_is_net_iov(frag)) {
-               return netmem_to_net_iov(frag->netmem)->dma_addr + offset +
-                      frag->offset;
+               return netmem_to_net_iov(frag->netmem)->desc.dma_addr +
+                      offset + frag->offset;
        }
        return dma_map_page(dev, skb_frag_page(frag),
                            skb_frag_off(frag) + offset, size, dir);
index 1d04754bc756d40a125b68abf2a0a0ceda464992..ec4217d6c0b4fdb819aa80277acb608a841cdc99 100644 (file)
@@ -97,9 +97,9 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
        index = offset / PAGE_SIZE;
        niov = &owner->area.niovs[index];
 
-       niov->pp_magic = 0;
-       niov->pp = NULL;
-       atomic_long_set(&niov->pp_ref_count, 0);
+       niov->desc.pp_magic = 0;
+       niov->desc.pp = NULL;
+       atomic_long_set(&niov->desc.pp_ref_count, 0);
 
        return niov;
 }
index dee578aad690d7cf125c2a8697f0a52f3a69bff8..f035440c475a9784fe9703d5d03847740b8d11bb 100644 (file)
@@ -2587,7 +2587,7 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
                                if (err)
                                        goto out;
 
-                               atomic_long_inc(&niov->pp_ref_count);
+                               atomic_long_inc(&niov->desc.pp_ref_count);
                                tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
 
                                sent += copy;