From 124288434149fd2e377cd8baf529263a0ae9e39d Mon Sep 17 00:00:00 2001 From: Ryan Stone Date: Thu, 11 Sep 2014 14:53:09 -0400 Subject: [PATCH 1/6] Explicitly track outstanding VF -> PF messages in a queue --- sys/dev/ixl/if_ixlv.c | 216 ++++++++++++------------------- sys/dev/ixl/ixl_vc_mgr.h | 75 +++++++++++ sys/dev/ixl/ixlv.h | 21 ++- sys/dev/ixl/ixlvc.c | 325 +++++++++++++++++++++++++++++++---------------- 4 files changed, 387 insertions(+), 250 deletions(-) create mode 100644 sys/dev/ixl/ixl_vc_mgr.h diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c index a29d669..4c472a4 100644 --- a/sys/dev/ixl/if_ixlv.c +++ b/sys/dev/ixl/if_ixlv.c @@ -103,12 +103,14 @@ static void ixlv_free_filters(struct ixlv_sc *); static void ixlv_msix_que(void *); static void ixlv_msix_adminq(void *); static void ixlv_do_adminq(void *, int); -static void ixlv_sched_aq(void *); +static void ixlv_do_adminq_locked(struct ixlv_sc *sc); static void ixlv_handle_que(void *, int); static int ixlv_reset(struct ixlv_sc *); static int ixlv_reset_complete(struct i40e_hw *); static void ixlv_set_queue_rx_itr(struct ixl_queue *); static void ixlv_set_queue_tx_itr(struct ixl_queue *); +static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *, + enum i40e_status_code); static void ixlv_enable_adminq_irq(struct i40e_hw *); static void ixlv_disable_adminq_irq(struct i40e_hw *); @@ -288,12 +290,9 @@ ixlv_attach(device_t dev) /* Core Lock Init*/ mtx_init(&sc->mtx, device_get_nameunit(dev), "IXL SC Lock", MTX_DEF); - mtx_init(&sc->aq_task_mtx, device_get_nameunit(dev), - "IXL AQ Task Lock", MTX_DEF); /* Set up the timer & aq watchdog callouts */ callout_init_mtx(&sc->timer, &sc->mtx, 0); - callout_init_mtx(&sc->aq_task, &sc->aq_task_mtx, 0); /* Save off the information about this board */ hw->vendor_id = pci_get_vendor(dev); @@ -487,9 +486,6 @@ ixlv_attach(device_t dev) /* Start AdminQ taskqueue */ ixlv_init_taskqueue(sc); - /* Start the admin queue scheduler timer */ - callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc); - /* Initialize stats */ bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); ixlv_add_stats_sysctls(sc); @@ -504,10 +500,10 @@ ixlv_attach(device_t dev) ixlv_enable_adminq_irq(hw); /* Set things up to run init */ - sc->aq_pending = 0; - sc->aq_required = 0; sc->init_state = IXLV_INIT_READY; + ixl_vc_init_mgr(sc, &sc->vc_mgr); + INIT_DBG_DEV(dev, "end"); return (error); @@ -521,7 +517,6 @@ err_pci_res: ixlv_free_pci_resources(sc); err_early: mtx_destroy(&sc->mtx); - mtx_destroy(&sc->aq_task_mtx); ixlv_free_filters(sc); INIT_DBG_DEV(dev, "end: error %d", error); return (error); @@ -579,21 +574,7 @@ ixlv_detach(device_t dev) if (vsi->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); - /* Stop AQ callout */ - callout_drain(&sc->aq_task); - callout_stop(&sc->aq_task); - -#ifdef IXL_DEBUG - /* Report on possible AQ failures */ - if (sc->aq_required || sc->aq_pending) { - device_printf(dev, "AQ status on detach:\n"); - device_printf(dev, "required : 0x%4b\n", sc->aq_required, - IXLV_FLAGS); - device_printf(dev, "pending : 0x%4b\n", sc->aq_pending, - IXLV_FLAGS); - device_printf(dev, "current_op: %d\n", sc->current_op); - } -#endif + ixl_vc_flush(&sc->vc_mgr); i40e_shutdown_adminq(&sc->hw); while (taskqueue_cancel(sc->tq, &sc->aq_irq, NULL) != 0) @@ -609,7 +590,6 @@ ixlv_detach(device_t dev) ixlv_free_pci_resources(sc); ixlv_free_queues(vsi); mtx_destroy(&sc->mtx); - mtx_destroy(&sc->aq_task_mtx); ixlv_free_filters(sc); bus_generic_detach(dev); @@ -896,13 +876,32 @@ ixlv_reinit_locked(struct ixlv_sc *sc) } ixlv_enable_adminq_irq(hw); - sc->aq_pending = 0; - sc->aq_required = 0; + ixl_vc_flush(&sc->vc_mgr); INIT_DBG_IF(ifp, "end"); return (error); } +static void +ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg, + enum i40e_status_code code) +{ + struct ixlv_sc *sc; + + sc = arg; + + /* + * Ignore "Adapter Stopped" message as that happens if an ifconfig down + * happens while a command is in progress, so we don't print an error + * in that case. + */ + if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) { + if_printf(sc->vsi.ifp, + "Error %d waiting for PF to complete operation %d\n", + code, cmd->request); + } +} + static void ixlv_init_locked(struct ixlv_sc *sc) @@ -947,9 +946,9 @@ ixlv_init_locked(struct ixlv_sc *sc) // send message, then enqueue another task if (!error || error == EEXIST) { - sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER; - callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, - ixlv_sched_aq, sc); + ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd, + IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, + sc); } /* Setup vlan's if needed */ @@ -972,22 +971,19 @@ ixlv_init_locked(struct ixlv_sc *sc) } /* Configure queues */ - sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_QUEUES; - callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, - ixlv_sched_aq, sc); + ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd, + IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc); /* Set up RSS */ ixlv_config_rss(sc); /* Map vectors */ - sc->aq_required |= IXLV_FLAG_AQ_MAP_VECTORS; - callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, - ixlv_sched_aq, sc); + ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, + IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc); /* Enable queues */ - sc->aq_required |= IXLV_FLAG_AQ_ENABLE_QUEUES; - callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, - ixlv_sched_aq, sc); + ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd, + IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc); /* Start the local timer */ callout_reset(&sc->timer, hz, ixlv_local_timer, sc); @@ -1569,7 +1565,8 @@ ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) SLIST_INSERT_HEAD(sc->vlan_filters, v, next); v->vlan = vtag; v->flags = IXL_FILTER_ADD; - sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER; + ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, + IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } @@ -1602,7 +1599,8 @@ ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) } } if (i) - sc->aq_required |= IXLV_FLAG_AQ_DEL_VLAN_FILTER; + ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd, + IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } @@ -2045,7 +2043,9 @@ ixlv_init_multi(struct ixl_vsi *vsi) } } if (mcnt > 0) - sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER; + ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, + IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, + sc); IOCTL_DBG_IF(vsi->ifp, "end"); } @@ -2076,7 +2076,9 @@ ixlv_add_multi(struct ixl_vsi *vsi) /* delete all multicast filters */ ixlv_init_multi(vsi); sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC; - sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_PROMISC; + ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, + IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, + sc); IOCTL_DEBUGOUT("%s: end: too many filters", __func__); return; } @@ -2097,7 +2099,9 @@ ixlv_add_multi(struct ixl_vsi *vsi) ** added to hw list */ if (mcnt > 0) - sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER; + ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, + IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, + sc); IOCTL_DBG_IF(ifp, "end"); } @@ -2146,7 +2150,9 @@ ixlv_del_multi(struct ixl_vsi *vsi) if_maddr_runlock(ifp); if (mcnt > 0) - sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER; + ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, + IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, + sc); IOCTL_DBG_IF(ifp, "end"); } @@ -2224,6 +2230,8 @@ ixlv_local_timer(void *arg) if (oldval != val) wr32(hw, hw->aq.asq.len, val); + ixlv_request_stats(sc); + /* clean and process any events */ taskqueue_enqueue(sc->tq, &sc->aq_irq); @@ -2313,13 +2321,20 @@ ixlv_update_link_status(struct ixlv_sc *sc) static void ixlv_stop(struct ixlv_sc *sc) { + struct ifnet *ifp; + int start; mtx_assert(&sc->mtx, MA_OWNED); - INIT_DBG_IF(&sc->vsi->ifp, "begin"); + ifp = sc->vsi.ifp; + INIT_DBG_IF(ifp, "begin"); + + ixl_vc_flush(&sc->vc_mgr); + ixlv_disable_queues(sc); - sc->aq_required |= IXLV_FLAG_AQ_DISABLE_QUEUES; - callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, - ixlv_sched_aq, sc); + start = ticks; + while ((ifp->if_drv_flags & IFF_DRV_RUNNING) && + ((ticks - start) < hz/10)) + ixlv_do_adminq_locked(sc); /* Stop the local timer */ callout_stop(&sc->timer); @@ -2447,7 +2462,8 @@ ixlv_setup_vlan_filters(struct ixlv_sc *sc) if (cnt == 0) return; - sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER; + ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, + IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); return; } @@ -2496,24 +2512,28 @@ static void ixlv_do_adminq(void *context, int pending) { struct ixlv_sc *sc = context; + + mtx_lock(&sc->mtx); + ixlv_do_adminq_locked(sc); + mtx_unlock(&sc->mtx); + return; +} + +static void +ixlv_do_adminq_locked(struct ixlv_sc *sc) +{ struct i40e_hw *hw = &sc->hw; struct i40e_arq_event_info event; struct i40e_virtchnl_msg *v_msg; i40e_status ret; u16 result = 0; + IXL_CORE_LOCK_ASSERT(sc); event.buf_len = IXL_AQ_BUF_SZ; - event.msg_buf = malloc(event.buf_len, - M_DEVBUF, M_NOWAIT | M_ZERO); - if (!event.msg_buf) { - printf("Unable to allocate adminq memory\n"); - return; - } + event.msg_buf = sc->aq_buffer; v_msg = (struct i40e_virtchnl_msg *)&event.desc; - mtx_lock(&sc->mtx); - /* clean and process any events */ do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) @@ -2525,84 +2545,6 @@ ixlv_do_adminq(void *context, int pending) } while (result); ixlv_enable_adminq_irq(hw); - free(event.msg_buf, M_DEVBUF); - mtx_unlock(&sc->mtx); - return; -} - -/* -** ixlv_sched_aq - Periodic scheduling tasklet -** -*/ -static void -ixlv_sched_aq(void *context) -{ - struct ixlv_sc *sc = context; - struct ixl_vsi *vsi = &sc->vsi; - - /* This is driven by a callout, don't spin */ - if (!mtx_trylock(&sc->mtx)) - goto done_nolock; - - if (sc->init_state == IXLV_RESET_PENDING) - goto done; - - /* Process requested admin queue tasks */ - if (sc->aq_pending) - goto done; - - if (sc->aq_required & IXLV_FLAG_AQ_MAP_VECTORS) { - ixlv_map_queues(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_ADD_MAC_FILTER) { - ixlv_add_ether_filters(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_ADD_VLAN_FILTER) { - ixlv_add_vlans(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_DEL_MAC_FILTER) { - ixlv_del_ether_filters(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_DEL_VLAN_FILTER) { - ixlv_del_vlans(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_CONFIGURE_QUEUES) { - ixlv_configure_queues(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_DISABLE_QUEUES) { - ixlv_disable_queues(sc); - goto done; - } - - if (sc->aq_required & IXLV_FLAG_AQ_ENABLE_QUEUES) { - ixlv_enable_queues(sc); - goto done; - } - - /* Do stats request only if no other AQ operations requested */ - if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) - ixlv_request_stats(sc); - -done: - mtx_unlock(&sc->mtx); -done_nolock: - if (sc->aq_required) /* Reschedule */ - callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, - ixlv_sched_aq, sc); - else - callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc); } static void diff --git a/sys/dev/ixl/ixl_vc_mgr.h b/sys/dev/ixl/ixl_vc_mgr.h new file mode 100644 index 0000000..933717c --- /dev/null +++ b/sys/dev/ixl/ixl_vc_mgr.h @@ -0,0 +1,75 @@ +/****************************************************************************** + + Copyright (c) 2013-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ + +#ifndef _I40E_VC_MGR_H_ +#define _I40E_VC_MGR_H_ + +#include + +struct ixl_vc_cmd; + +typedef void ixl_vc_callback_t(struct ixl_vc_cmd *, void *, + enum i40e_status_code); + + +#define IXLV_VC_CMD_FLAG_BUSY 0x0001 + +struct ixl_vc_cmd +{ + uint32_t request; + uint32_t flags; + + ixl_vc_callback_t *callback; + void *arg; + + TAILQ_ENTRY(ixl_vc_cmd) next; +}; + +struct ixl_vc_mgr +{ + struct ixlv_sc *sc; + struct ixl_vc_cmd *current; + struct callout callout; + + TAILQ_HEAD(, ixl_vc_cmd) pending; +}; + +#define IXL_VC_TIMEOUT (2 * hz) + +void ixl_vc_init_mgr(struct ixlv_sc *, struct ixl_vc_mgr *); +void ixl_vc_enqueue(struct ixl_vc_mgr *, struct ixl_vc_cmd *, + uint32_t, ixl_vc_callback_t *, void *); +void ixl_vc_flush(struct ixl_vc_mgr *mgr); + +#endif + diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h index a5bfe13..f2b53c2 100644 --- a/sys/dev/ixl/ixlv.h +++ b/sys/dev/ixl/ixlv.h @@ -36,6 +36,8 @@ #ifndef _IXLV_H_ #define _IXLV_H_ +#include "ixl_vc_mgr.h" + #define IXLV_AQ_MAX_ERR 100 #define IXLV_MAX_FILTERS 128 #define IXLV_MAX_QUEUES 16 @@ -111,12 +113,10 @@ struct ixlv_sc { struct ifmedia media; struct callout timer; - struct callout aq_task; int msix; int if_flags; struct mtx mtx; - struct mtx aq_task_mtx; u32 qbase; u32 admvec; @@ -138,11 +138,19 @@ struct ixlv_sc { /* Admin queue task flags */ u32 aq_wait_count; - u32 aq_required; - u32 aq_pending; + + struct ixl_vc_mgr vc_mgr; + struct ixl_vc_cmd add_mac_cmd; + struct ixl_vc_cmd del_mac_cmd; + struct ixl_vc_cmd config_queues_cmd; + struct ixl_vc_cmd map_vectors_cmd; + struct ixl_vc_cmd enable_queues_cmd; + struct ixl_vc_cmd add_vlan_cmd; + struct ixl_vc_cmd del_vlan_cmd; + struct ixl_vc_cmd add_multi_cmd; + struct ixl_vc_cmd del_multi_cmd; /* Virtual comm channel */ - enum i40e_virtchnl_ops current_op; struct i40e_virtchnl_vf_resource *vf_res; struct i40e_virtchnl_vsi_resource *vsi_res; @@ -158,8 +166,11 @@ struct ixlv_sc { u8 disable_queues_done; u8 add_ether_done; u8 del_ether_done; + + u8 aq_buffer[IXL_AQ_BUF_SZ]; }; +#define IXL_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED) /* ** This checks for a zero mac addr, something that will be likely ** unless the Admin on the Host has created one. diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c index 1f912b5..87ebebe 100644 --- a/sys/dev/ixl/ixlvc.c +++ b/sys/dev/ixl/ixlvc.c @@ -47,6 +47,12 @@ #define IXLV_BUSY_WAIT_DELAY 10 #define IXLV_BUSY_WAIT_COUNT 50 +static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t, + enum i40e_status_code); +static void ixl_vc_process_next(struct ixl_vc_mgr *mgr); +static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr); +static void ixl_vc_send_current(struct ixl_vc_mgr *mgr); + /* ** Validate VF messages */ @@ -349,24 +355,14 @@ ixlv_configure_queues(struct ixlv_sc *sc) struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - - - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ -#ifdef IXL_DEBUG - device_printf(dev, "%s: command %d pending\n", - __func__, sc->current_op); -#endif - return; - } pairs = vsi->num_queues; - sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!vqci) { device_printf(dev, "%s: unable to allocate memory\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } vqci->vsi_id = sc->vsi_res->vsi_id; @@ -399,8 +395,6 @@ ixlv_configure_queues(struct ixlv_sc *sc) ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, (u8 *)vqci, len); free(vqci, M_DEVBUF); - sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES; - sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES; } /* @@ -413,22 +407,11 @@ ixlv_enable_queues(struct ixlv_sc *sc) { struct i40e_virtchnl_queue_select vqs; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { - /* we already have a command pending */ -#ifdef IXL_DEBUG - device_printf(sc->dev, "%s: command %d pending\n", - __func__, sc->current_op); -#endif - return; - } - sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = sc->vsi_res->vsi_id; vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; vqs.rx_queues = vqs.tx_queues; ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); - sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES; - sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES; } /* @@ -441,22 +424,11 @@ ixlv_disable_queues(struct ixlv_sc *sc) { struct i40e_virtchnl_queue_select vqs; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { - /* we already have a command pending */ -#ifdef IXL_DEBUG - device_printf(sc->dev, "%s: command %d pending\n", - __func__, sc->current_op); -#endif - return; - } - sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = sc->vsi_res->vsi_id; vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; vqs.rx_queues = vqs.tx_queues; ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); - sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES; - sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES; } /* @@ -473,16 +445,6 @@ ixlv_map_queues(struct ixlv_sc *sc) struct ixl_vsi *vsi = &sc->vsi; struct ixl_queue *que = vsi->queues; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { - /* we already have a command pending */ -#ifdef IXL_DEBUG - device_printf(sc->dev, "%s: command %d pending\n", - __func__, sc->current_op); -#endif - return; - } - sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; - /* How many queue vectors, adminq uses one */ q = sc->msix - 1; @@ -491,6 +453,7 @@ ixlv_map_queues(struct ixlv_sc *sc) vm = malloc(len, M_DEVBUF, M_NOWAIT); if (!vm) { printf("%s: unable to allocate memory\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } @@ -512,8 +475,6 @@ ixlv_map_queues(struct ixlv_sc *sc) ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, (u8 *)vm, len); free(vm, M_DEVBUF); - sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS; - sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS; } /* @@ -529,11 +490,6 @@ ixlv_add_vlans(struct ixlv_sc *sc) device_t dev = sc->dev; int len, i = 0, cnt = 0; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) - return; - - sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; - /* Get count of VLAN filters to add */ SLIST_FOREACH(f, sc->vlan_filters, next) { if (f->flags & IXL_FILTER_ADD) @@ -541,8 +497,8 @@ ixlv_add_vlans(struct ixlv_sc *sc) } if (!cnt) { /* no work... */ - sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER; - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, + I40E_SUCCESS); return; } @@ -552,6 +508,7 @@ ixlv_add_vlans(struct ixlv_sc *sc) if (len > IXL_AQ_BUF_SZ) { device_printf(dev, "%s: Exceeded Max AQ Buf size\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } @@ -559,6 +516,7 @@ ixlv_add_vlans(struct ixlv_sc *sc) if (!v) { device_printf(dev, "%s: unable to allocate memory\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } @@ -577,14 +535,14 @@ ixlv_add_vlans(struct ixlv_sc *sc) } if (i == 0) { /* Should not happen... */ device_printf(dev, "%s: i == 0?\n", __func__); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, + I40E_SUCCESS); return; } ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); free(v, M_DEVBUF); /* add stats? */ - sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER; - sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER; } /* @@ -600,11 +558,6 @@ ixlv_del_vlans(struct ixlv_sc *sc) struct ixlv_vlan_filter *f, *ftmp; int len, i = 0, cnt = 0; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) - return; - - sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; - /* Get count of VLAN filters to delete */ SLIST_FOREACH(f, sc->vlan_filters, next) { if (f->flags & IXL_FILTER_DEL) @@ -612,8 +565,8 @@ ixlv_del_vlans(struct ixlv_sc *sc) } if (!cnt) { /* no work... */ - sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER; - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, + I40E_SUCCESS); return; } @@ -623,6 +576,7 @@ ixlv_del_vlans(struct ixlv_sc *sc) if (len > IXL_AQ_BUF_SZ) { device_printf(dev, "%s: Exceeded Max AQ Buf size\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } @@ -630,6 +584,7 @@ ixlv_del_vlans(struct ixlv_sc *sc) if (!v) { device_printf(dev, "%s: unable to allocate memory\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } @@ -649,14 +604,14 @@ ixlv_del_vlans(struct ixlv_sc *sc) } if (i == 0) { /* Should not happen... */ device_printf(dev, "%s: i == 0?\n", __func__); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, + I40E_SUCCESS); return; } ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); free(v, M_DEVBUF); /* add stats? */ - sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER; - sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER; } @@ -673,11 +628,6 @@ ixlv_add_ether_filters(struct ixlv_sc *sc) device_t dev = sc->dev; int len, j = 0, cnt = 0; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) - return; - - sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; - /* Get count of MAC addresses to add */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_ADD) @@ -685,8 +635,8 @@ ixlv_add_ether_filters(struct ixlv_sc *sc) } if (cnt == 0) { /* Should not happen... */ DDPRINTF(dev, "cnt == 0, exiting..."); - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER; + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, + I40E_SUCCESS); wakeup(&sc->add_ether_done); return; } @@ -698,6 +648,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc) if (a == NULL) { device_printf(dev, "%s: Failed to get memory for " "virtchnl_ether_addr_list\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } a->vsi_id = sc->vsi.id; @@ -722,8 +673,6 @@ ixlv_add_ether_filters(struct ixlv_sc *sc) I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len); /* add stats? */ free(a, M_DEVBUF); - sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER; - sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER; return; } @@ -740,11 +689,6 @@ ixlv_del_ether_filters(struct ixlv_sc *sc) struct ixlv_mac_filter *f, *f_temp; int len, j = 0, cnt = 0; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) - return; - - sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; - /* Get count of MAC addresses to delete */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_DEL) @@ -752,8 +696,8 @@ ixlv_del_ether_filters(struct ixlv_sc *sc) } if (cnt == 0) { DDPRINTF(dev, "cnt == 0, exiting..."); - sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER; - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, + I40E_SUCCESS); wakeup(&sc->del_ether_done); return; } @@ -765,6 +709,7 @@ ixlv_del_ether_filters(struct ixlv_sc *sc) if (d == NULL) { device_printf(dev, "%s: Failed to get memory for " "virtchnl_ether_addr_list\n", __func__); + ixl_vc_schedule_retry(&sc->vc_mgr); return; } d->vsi_id = sc->vsi.id; @@ -787,8 +732,6 @@ ixlv_del_ether_filters(struct ixlv_sc *sc) I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len); /* add stats? */ free(d, M_DEVBUF); - sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER; - sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER; return; } @@ -806,7 +749,6 @@ ixlv_request_reset(struct ixlv_sc *sc) */ wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS); ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; } /* @@ -819,16 +761,10 @@ ixlv_request_stats(struct ixlv_sc *sc) struct i40e_virtchnl_queue_select vqs; int error = 0; - if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) - return; - - sc->current_op = I40E_VIRTCHNL_OP_GET_STATS; vqs.vsi_id = sc->vsi_res->vsi_id; error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, sizeof(vqs)); /* Low priority, ok if it fails */ - if (error) - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; } /* @@ -897,14 +833,6 @@ ixlv_vc_completion(struct ixlv_sc *sc, return; } - if (v_opcode != sc->current_op - && sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) { - device_printf(dev, "%s: Pending op is %d, received %d.\n", - __func__, sc->current_op, v_opcode); - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - return; - } - /* Catch-all error response */ if (v_retval) { device_printf(dev, @@ -922,26 +850,32 @@ ixlv_vc_completion(struct ixlv_sc *sc, ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); break; case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: - sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, + v_retval); if (v_retval) { device_printf(dev, "WARNING: Error adding VF mac filter!\n"); device_printf(dev, "WARNING: Device may not receive traffic!\n"); } break; case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: - sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, + v_retval); break; case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC, + v_retval); break; case I40E_VIRTCHNL_OP_ADD_VLAN: - sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, + v_retval); break; case I40E_VIRTCHNL_OP_DEL_VLAN: - sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, + v_retval); break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: - sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, + v_retval); if (v_retval == 0) { /* Turn on all interrupts */ ixlv_enable_intr(vsi); @@ -951,7 +885,8 @@ ixlv_vc_completion(struct ixlv_sc *sc, } break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: - sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, + v_retval); if (v_retval == 0) { /* Turn off all interrupts */ ixlv_disable_intr(vsi); @@ -960,10 +895,12 @@ ixlv_vc_completion(struct ixlv_sc *sc, } break; case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: - sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES, + v_retval); break; case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: - sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS); + ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS, + v_retval); break; default: device_printf(dev, @@ -971,6 +908,178 @@ ixlv_vc_completion(struct ixlv_sc *sc, __func__, v_opcode); break; } - sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; return; } + +static void +ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request) +{ + + switch (request) { + case IXLV_FLAG_AQ_MAP_VECTORS: + ixlv_map_queues(sc); + break; + + case IXLV_FLAG_AQ_ADD_MAC_FILTER: + ixlv_add_ether_filters(sc); + break; + + case IXLV_FLAG_AQ_ADD_VLAN_FILTER: + ixlv_add_vlans(sc); + break; + + case IXLV_FLAG_AQ_DEL_MAC_FILTER: + ixlv_del_ether_filters(sc); + break; + + case IXLV_FLAG_AQ_DEL_VLAN_FILTER: + ixlv_del_vlans(sc); + break; + + case IXLV_FLAG_AQ_CONFIGURE_QUEUES: + ixlv_configure_queues(sc); + break; + + case IXLV_FLAG_AQ_DISABLE_QUEUES: + ixlv_disable_queues(sc); + break; + + case IXLV_FLAG_AQ_ENABLE_QUEUES: + ixlv_enable_queues(sc); + break; + } +} + +void +ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr) +{ + + mgr->sc = sc; + mgr->current = NULL; + TAILQ_INIT(&mgr->pending); + callout_init_mtx(&mgr->callout, &sc->mtx, 0); +} + +static void +ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err) +{ + struct ixl_vc_cmd *cmd; + + cmd = mgr->current; + mgr->current = NULL; + cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; + + cmd->callback(cmd, cmd->arg, err); + ixl_vc_process_next(mgr); +} + +static void +ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, + enum i40e_status_code err) +{ + struct ixl_vc_cmd *cmd; + + cmd = mgr->current; + if (cmd == NULL || cmd->request != request) + return; + + callout_stop(&mgr->callout); + ixl_vc_process_completion(mgr, err); +} + +static void +ixl_vc_cmd_timeout(void *arg) +{ + + ixl_vc_process_completion(arg, I40E_ERR_TIMEOUT); +} + +static void +ixl_vc_cmd_retry(void *arg) +{ + + ixl_vc_send_current(arg); +} + +static void +ixl_vc_send_current(struct ixl_vc_mgr *mgr) +{ + struct ixl_vc_cmd *cmd; + + cmd = mgr->current; + ixl_vc_send_cmd(mgr->sc, cmd->request); + callout_reset(&mgr->callout, IXL_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr); +} + +static void +ixl_vc_process_next(struct ixl_vc_mgr *mgr) +{ + struct ixl_vc_cmd *cmd; + + if (mgr->current != NULL) + return; + + if (TAILQ_EMPTY(&mgr->pending)) + return; + + cmd = TAILQ_FIRST(&mgr->pending); + TAILQ_REMOVE(&mgr->pending, cmd, next); + + mgr->current = cmd; + ixl_vc_send_current(mgr); +} + +static void +ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr) +{ + + callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); +} + +void +ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, + uint32_t req, ixl_vc_callback_t *callback, void *arg) +{ + + IXL_CORE_LOCK_ASSERT(mgr->sc); + + if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { + if (mgr->current == cmd) + mgr->current = NULL; + else + TAILQ_REMOVE(&mgr->pending, cmd, next); + } + + cmd->request = req; + cmd->callback = callback; + cmd->arg = arg; + cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; + TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); + + ixl_vc_process_next(mgr); +} + +void +ixl_vc_flush(struct ixl_vc_mgr *mgr) +{ + struct ixl_vc_cmd *cmd; + + IXL_CORE_LOCK_ASSERT(mgr->sc); + KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, + ("ixlv: pending commands waiting but no command in progress")); + + cmd = mgr->current; + if (cmd != NULL) { + mgr->current = NULL; + cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; + cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); + } + + while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) { + TAILQ_REMOVE(&mgr->pending, cmd, next); + cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; + cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); + } + + callout_stop(&mgr->callout); +} -- 1.9.3