qemu/hw/net/net_tx_pkt.h
Akihiko Odaki f199b13bc1 igb: Implement Tx SCTP CSO
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-05-23 15:20:15 +08:00

235 lines
5.7 KiB
C

/*
* QEMU TX packets abstraction
*
* Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
*
* Developed by Daynix Computing LTD (http://www.daynix.com)
*
* Authors:
* Dmitry Fleytman <dmitry@daynix.com>
* Tamir Shomer <tamirs@daynix.com>
* Yan Vugenfirer <yan@daynix.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef NET_TX_PKT_H
#define NET_TX_PKT_H
#include "net/eth.h"
#include "exec/hwaddr.h"
/* define to enable packet dump functions */
/*#define NET_TX_PKT_DEBUG*/
struct NetTxPkt;
typedef void (*NetTxPktFreeFrag)(void *, void *, size_t);
typedef void (*NetTxPktSend)(void *, const struct iovec *, int, const struct iovec *, int);
/**
* Init function for tx packet functionality
*
* @pkt: packet pointer
* @max_frags: max tx ip fragments
*/
void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags);
/**
* Clean all tx packet resources.
*
* @pkt: packet.
*/
void net_tx_pkt_uninit(struct NetTxPkt *pkt);
/**
* get virtio header
*
* @pkt: packet
* @ret: virtio header
*/
struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt);
/**
* build virtio header (will be stored in module context)
*
* @pkt: packet
* @tso_enable: TSO enabled
* @csum_enable: CSO enabled
* @gso_size: MSS size for TSO
* @ret: operation result
*
*/
bool net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size);
/**
* updates vlan tag, and adds vlan header with custom ethernet type
* in case it is missing.
*
* @pkt: packet
* @vlan: VLAN tag
* @vlan_ethtype: VLAN header Ethernet type
*
*/
void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
uint16_t vlan, uint16_t vlan_ethtype);
/**
* updates vlan tag, and adds vlan header in case it is missing
*
* @pkt: packet
* @vlan: VLAN tag
*
*/
static inline void
net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan)
{
net_tx_pkt_setup_vlan_header_ex(pkt, vlan, ETH_P_VLAN);
}
/**
* populate data fragment into pkt context.
*
* @pkt: packet
* @base: pointer to fragment
* @len: length of fragment
*
*/
bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len);
/**
* Fix ip header fields and calculate IP header and pseudo header checksums.
*
* @pkt: packet
*
*/
void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt);
/**
* Calculate the IP header checksum.
*
* @pkt: packet
*
*/
void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt);
/**
* Calculate the SCTP checksum.
*
* @pkt: packet
*
*/
bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt);
/**
* get length of all populated data.
*
* @pkt: packet
* @ret: total data length
*
*/
size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt);
/**
* get packet type
*
* @pkt: packet
* @ret: packet type
*
*/
eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt);
/**
* prints packet data if debug is enabled
*
* @pkt: packet
*
*/
void net_tx_pkt_dump(struct NetTxPkt *pkt);
/**
* reset tx packet private context (needed to be called between packets)
*
* @pkt: packet
* @callback: function to free the fragments
* @context: pointer to be passed to the callback
*/
void net_tx_pkt_reset(struct NetTxPkt *pkt,
NetTxPktFreeFrag callback, void *context);
/**
* Unmap a fragment mapped from a PCI device.
*
* @context: PCI device owning fragment
* @base: pointer to fragment
* @len: length of fragment
*/
void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len);
/**
* map data fragment from PCI device and populate it into pkt context.
*
* @pci_dev: PCI device owning fragment
* @pa: physical address of fragment
* @len: length of fragment
*/
bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev,
dma_addr_t pa, size_t len);
/**
* Send packet to qemu. handles sw offloads if vhdr is not supported.
*
* @pkt: packet
* @nc: NetClientState
* @ret: operation result
*
*/
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc);
/**
* Send packet with a custom function.
*
* @pkt: packet
* @offload: whether the callback implements offloading
* @callback: a function to be called back for each transformed packet
* @context: a pointer to be passed to the callback.
* @ret: operation result
*/
bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
NetTxPktSend callback, void *context);
/**
* parse raw packet data and analyze offload requirements.
*
* @pkt: packet
*
*/
bool net_tx_pkt_parse(struct NetTxPkt *pkt);
/**
* indicates if there are data fragments held by this packet object.
*
* @pkt: packet
*
*/
bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt);
/**
* Fix IPv6 'plen' field.
* If ipv6 payload length field is 0 - then there should be Hop-by-Hop
* option for packets greater than 65,535.
* For packets with a payload less than 65,535: fix 'plen' field.
* For backends with vheader, we need just one packet with proper
* payload size. For now, qemu drops every packet with size greater 64K
* (see net_tx_pkt_send()) so, there is no reason to add jumbo option to ip6
* hop-by-hop extension if it's missed
*
* @pkt packet
*/
void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt);
#endif