@@ -424,42 +424,54 @@ EXPORT_SYMBOL(netif_rx);
*/
/* _VMKLNX_CODECHECK_: netif_receive_skb */
int
netif_receive_skb(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
vmk_NetPoll pollPriv;
vmk_NetPoll netPoll;
vmk_Worldlet wdt;
struct napi_struct *napi = NULL;
vmk_PktHandle *pkt;
int status;
VMK_ASSERT(dev);
/*
* When the system is not in the panic/debug status, put the arrived packets into
* skb->napi->rxPktList.
if (skb->napi == NULL) {
if (unlikely(vmk_WorldletGetCurrent(&wdt, (void **)&pollPriv) != VMK_OK)) {
if (unlikely(vmk_WorldletGetCurrent(&wdt, (void **)&netPoll) != VMK_OK)) {
VMK_ASSERT(VMK_FALSE);
dev_kfree_skb_any(skb);
dev->linnet_rx_dropped++;
status = NET_RX_DROP;
goto done;
} else {
* When the system is in the panic/debug status, the current worldlet is the
* debug worldlet rather than the napi_poll worldlet. In this case, put the
* debug worldlet rather than the NetPollWorldlet. In this case, put the
* arrived packets into debugPktList. This list will be processed by
* FlushRxBuffers, because netdump/netdebug will bypass the vswitch to read
* the packets.
if (vmk_NetPollGetCurrent(&pollPriv) == VMK_OK) {
napi = (struct napi_struct *)vmk_NetPollGetPrivate(pollPriv);
if (vmk_NetPollGetCurrent(&netPoll) == VMK_OK) {
void *priv = vmk_NetPollGetPrivate(netPoll);
if (pollpriv_type(priv) == NETPOLL_DEFAULT) {
napi = pollpriv_napi(priv);
spin_lock(&dev->napi_lock);
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (napi->dev_poll && test_bit(NAPI_STATE_SCHED, &napi->state)) {
break;
}
spin_unlock(&dev->napi_lock);
if (!napi || vmk_SystemCheckState(VMK_SYSTEM_STATE_PANIC)) {
pkt = skb->pkt;
status = map_skb_to_pkt(skb);
if (likely(status == NET_RX_SUCCESS)) {
if (debugPktList == NULL) {
if (vmk_PktListAlloc(&debugPktList) != VMK_OK) {
@@ -472,13 +484,13 @@ netif_receive_skb(struct sk_buff *skb)
VMK_ASSERT(pkt);
vmk_PktListAppendPkt(debugPktList, pkt);
VMK_ASSERT(pollPriv != NULL);
VMK_ASSERT(netPoll != NULL);
skb->napi = napi;
VMK_ASSERT(skb->napi != NULL);
@@ -516,16 +528,16 @@ EXPORT_SYMBOL(netif_receive_skb);
* None.
*
*----------------------------------------------------------------------------
static vmk_Bool
napi_poll(void *ptr)
VMK_ReturnStatus status = VMK_OK;
struct napi_struct *napi = (struct napi_struct *)ptr;
struct napi_struct *napi = pollpriv_napi(ptr);
* napi_schedule_prep()/napi_schedule() depend on accurately seeing whether
* or not the worldlet is running and assume that the check for polling
* executes only after the worldlet has been dispatched. If the CPU
* aggressively prefetches the test_bit() load here so that it occurs
@@ -571,15 +583,15 @@ napi_poll(void *ptr)
netdev_poll(void *private)
netdev_poll(void *ptr)
struct net_device *dev = private;
struct net_device *dev = pollpriv_net_device(ptr);
vmk_Bool needWork;
struct napi_struct *napi;
needWork = VMK_FALSE;
@@ -643,13 +655,13 @@ napi_poll_init(struct napi_struct *napi)
napi->napi_wdt_priv.dev = napi->dev;
napi->napi_wdt_priv.napi = napi;
napi->dev_poll = VMK_FALSE;
napi->vector = 0;
pollInit.poll = napi_poll;
pollInit.priv = napi;
pollInit.priv = pollpriv_embed(napi, NETPOLL_DEFAULT);
if (napi->dev->features & NETIF_F_CNA) {
pollInit.deliveryCallback = LinuxCNA_Poll;
pollInit.features = VMK_NETPOLL_CUSTOM_DELIVERY_CALLBACK;
pollInit.deliveryCallback = NULL;
@@ -727,15 +739,15 @@ netdev_poll_init(struct net_device *dev)
ret = vmk_ServiceGetID("netdev", &serviceID);
VMK_ASSERT(ret == VMK_OK);
dev->napi_wdt_priv.dev = dev;
dev->napi_wdt_priv.napi = NULL;
pollInit.poll = netdev_poll;
pollInit.priv = dev;
pollInit.priv = pollpriv_embed(dev, NETPOLL_BACKUP);
if (dev->features & NETIF_F_CNA) {
pollInit.deliveryCallback = LinuxCNADev_Poll;
@@ -1151,12 +1163,21 @@ skb_gen_pkt_frags(struct sk_buff *skb)
status = skb_append_frags_to_pkt(skb);
if (unlikely(status != VMK_OK)) {
return status;
* PR 922606:
* Set skb frag ownership to vmkernel, such that vmklinux won't try
* to free those MPNs if the skb needs to be dropped later. Instead,
* the frags/SGs will be freed when the associated pkt is destroyed
* by vmkernel.
vmklnx_set_skb_frags_owner_vmkernel(skb);
* Since we removed packet completion in vmklinux, we
* cannot support skb chaining anymore.
if (skb_shinfo(skb)->frag_list) {
@@ -1442,24 +1463,21 @@ skb_release_data(struct sk_buff *skb)
if (likely(atomic_dec_and_test(&(skb_shinfo(skb)->fragsref)))) {
if (skb->pkt) {
if ((in_irq() || irqs_disabled()) && !vmklnx_is_panic()) {
vmk_PktReleaseIRQ(skb->pkt);
* Try to queue packets in NAPI's compPktList in order to
* release them in batch, but first thoroughly check if we
* got called from a napi context (PR #396873).
if (vmk_NetPollGetCurrent(&pollPriv) == VMK_OK &&
(napi = (struct napi_struct *) vmk_NetPollGetPrivate(pollPriv)) != NULL &&
napi->net_poll_type == NETPOLL_DEFAULT) {
vmk_NetPollQueueCompPkt(pollPriv, skb->pkt);
vmk_NetPollQueueCompPkt(netPoll, skb->pkt);
vmk_PktRelease(skb->pkt);
@@ -2279,12 +2297,13 @@ vmklnx_netif_set_poll_cna(struct napi_struct *napi)
pollInit.priv = pollpriv_embed(pollInit.priv, napi->net_poll_type);
vmk_NetPollChangeCallback(napi->net_poll, &pollInit);
EXPORT_SYMBOL(vmklnx_netif_set_poll_cna);
@@ -6915,26 +6934,26 @@ GetNICPanicInfo(void *clientData,
static VMK_ReturnStatus
FlushRxBuffers(void* clientData)
struct net_device* dev = (struct net_device*)clientData;
struct napi_struct* napi = NULL;
VMKLNX_DEBUG(1, "client data, now net_device:%p", dev);
if (napi != NULL) {
VMKLNX_DEBUG(1, "Calling Pkt List Rx Process on napi:%p", napi);
VMK_ASSERT(napi->dev != NULL);
* Bypass the vswitch to receive the packets when the system is in the
* panic/debug mode.
if (vmk_NetPollGetCurrent(&pollPriv) != VMK_OK) {
if (vmk_NetPollGetCurrent(&netPoll) != VMK_OK) {
debugPktList = (vmk_PktList) vmk_HeapAlloc(vmklnxLowHeap,
vmk_PktListSizeInBytes);
return VMK_NO_MEMORY;