diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h index 3eb39ef5e98..0d5b9ea6a0d 100644 --- a/sys/dev/cxgbe/adapter.h +++ b/sys/dev/cxgbe/adapter.h @@ -160,7 +160,7 @@ enum { CHK_MBOX_ACCESS = (1 << 2), MASTER_PF = (1 << 3), ADAP_SYSCTL_CTX = (1 << 4), - ADAP_ERR = (1 << 5), + /* 5 is unused, used to be ADAP_ERR. */ BUF_PACKING_OK = (1 << 6), IS_VF = (1 << 7), KERN_TLS_ON = (1 << 8), /* HW is configured for KERN_TLS */ @@ -184,6 +184,8 @@ enum { DF_DISABLE_TCB_CACHE = (1 << 2), /* Disable TCB cache (T6+) */ DF_DISABLE_CFG_RETRY = (1 << 3), /* Disable fallback config */ DF_VERBOSE_SLOWINTR = (1 << 4), /* Chatty slow intr handler */ + DF_USE_LDST = (1 << 5), /* Prefer LDST over backdoor */ + DF_VERBOSE_CUDBG = (1 << 6), /* Chatty cudbg dump */ }; #define IS_DOOMED(vi) ((vi)->flags & DOOMED) @@ -852,6 +854,12 @@ struct devnames { struct clip_entry; +struct saved_cudbg_dump { + STAILQ_ENTRY(saved_cudbg_dump) link; + uint32_t dump_len; + uint8_t dump_buf[]; +}; + struct adapter { SLIST_ENTRY(adapter) link; device_t dev; @@ -900,7 +908,6 @@ struct adapter { int nrawf; struct taskqueue *tq[MAX_NCHAN]; /* General purpose taskqueues */ - struct task async_event_task; struct port_info *port[MAX_NPORTS]; uint8_t chan_map[MAX_NCHAN]; /* channel -> port */ @@ -930,6 +937,7 @@ struct adapter { int active_ulds; /* ULDs activated on this adapter */ int flags; int debug_flags; + int ignore_err_intr; char ifp_lockname[16]; struct mtx ifp_lock; @@ -986,6 +994,7 @@ struct adapter { struct mtx tc_lock; struct task tc_task; + struct task fatal_err_task; struct task reset_task; const void *reset_thread; int num_resets; @@ -999,6 +1008,10 @@ struct adapter { int sensor_resets; struct callout ktls_tick; + + struct mtx dump_lock; + struct cv dump_cv; + STAILQ_HEAD(, saved_cudbg_dump) dump_list; }; #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) @@ -1201,15 +1214,16 @@ tx_resume_threshold(struct sge_eq *eq) return (eq->sidx / 4); } -static inline int -t4_use_ldst(struct adapter *sc) +static inline bool +t4_fw_ok(struct adapter *sc) { + return (sc->flags & FW_OK); +} -#ifdef notyet - return (sc->flags & FW_OK || !sc->use_bd); -#else - return (0); -#endif +static inline bool +t4_use_ldst(struct adapter *sc) +{ + return (t4_fw_ok(sc) && sc->debug_flags & DF_USE_LDST); } static inline void @@ -1259,6 +1273,9 @@ int t4_os_pci_save_state(struct adapter *); int t4_os_pci_restore_state(struct adapter *); void t4_os_portmod_changed(struct port_info *); void t4_os_link_changed(struct port_info *); +void t4_os_reg_lock(struct adapter *); +void t4_os_reg_lock_assert(struct adapter *); +void t4_os_reg_unlock(struct adapter *); void t4_iterate(void (*)(struct adapter *, void *), void *); void t4_init_devnames(struct adapter *); void t4_add_adapter(struct adapter *); diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h index c132cb77920..1d050293091 100644 --- a/sys/dev/cxgbe/common/common.h +++ b/sys/dev/cxgbe/common/common.h @@ -49,7 +49,7 @@ enum { T5_REGMAP_SIZE = (332 * 1024), }; -enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; +enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1, MEM_HMA }; enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; @@ -654,7 +654,7 @@ int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id); -void t4_fatal_err(struct adapter *adapter, bool fw_error); +void t4_fatal_err(struct adapter *adapter); int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, int filter_index, int enable); void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c index f06b2112006..527934efaf9 100644 --- a/sys/dev/cxgbe/common/t4_hw.c +++ b/sys/dev/cxgbe/common/t4_hw.c @@ -122,6 +122,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { + t4_os_reg_lock_assert(adap); while (nregs--) { t4_write_reg(adap, addr_reg, start_idx); *vals++ = t4_read_reg(adap, data_reg); @@ -503,7 +504,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, t4_os_dump_devlog(adap); } - t4_fatal_err(adap, true); + t4_fatal_err(adap); return ret; } @@ -5356,7 +5357,7 @@ int t4_slow_intr_handler(struct adapter *adap, bool verbose) } fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose); if (fatal) - t4_fatal_err(adap, false); + t4_fatal_err(adap); return (0); } @@ -5794,12 +5795,14 @@ static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, indirect_access: if (rc) { + t4_os_reg_lock(adap); if (rw) t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, start_index); else t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, start_index); + t4_os_reg_unlock(adap); } } diff --git a/sys/dev/cxgbe/cudbg/cudbg.h b/sys/dev/cxgbe/cudbg/cudbg.h index 5d64b6f4b8a..cb244de1773 100644 --- a/sys/dev/cxgbe/cudbg/cudbg.h +++ b/sys/dev/cxgbe/cudbg/cudbg.h @@ -53,7 +53,6 @@ #define INOUT /* Error codes */ - #define CUDBG_STATUS_SUCCESS 0 #define CUDBG_STATUS_NOSPACE -2 #define CUDBG_STATUS_FLASH_WRITE_FAIL -3 @@ -93,13 +92,20 @@ #define CUDBG_STATUS_FILE_READ_FAILED -37 #define CUDBG_STATUS_CORRUPTED -38 #define CUDBG_STATUS_INVALID_INDEX -39 +#define CUDBG_STATUS_NO_DATA -40 +#define CUDBG_STATUS_PARTIAL_DATA -41 +#define CUDBG_STATUS_NO_MBOX_PERM -42 +#define CUDBG_STATUS_NO_BAR_ACCESS -43 +#define CUDBG_STATUS_IOCTL_FAILED -44 #define CUDBG_MAJOR_VERSION 1 #define CUDBG_MINOR_VERSION 14 #define CUDBG_BUILD_VERSION 0 -#define CUDBG_FILE_NAME_LEN 256 -#define CUDBG_DIR_NAME_LEN 256 +#define CUDBG_MAX_PARAMS 16 + +#define CUDBG_NTHREADS 8 + #define CUDBG_MAX_BITMAP_LEN 16 static char ATTRIBUTE_UNUSED * err_msg[] = { @@ -111,7 +117,7 @@ static char ATTRIBUTE_UNUSED * err_msg[] = { "Undefined out buf", "Callback function undefined", "Print callback function undefined", - "ADAP invalid", + "ADAP invalid. May be Invalid Interface", "Flash empty", "No adapter", "No signature", @@ -142,7 +148,12 @@ static char ATTRIBUTE_UNUSED * err_msg[] = { "Not supported", "File read fail", "Corrupted", - "Invalid Index" + "Invalid Index", + "No data found", + "Partial data", + "No valid mbox found", + "No BAR access", + "IOCTL failed", }; enum CUDBG_DBG_ENTITY_TYPE { @@ -214,13 +225,18 @@ enum CUDBG_DBG_ENTITY_TYPE { CUDBG_PBT_TABLE = 65, CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, - CUDBG_MAX_ENTITY = 68, + CUDBG_HMA = 68, + CUDBG_UPLOAD = 69, + CUDBG_QDESC = 70, + CUDBG_MOD_EEPROM = 71, + CUDBG_MAX_ENTITY, }; #define ENTITY_FLAG_NULL 0 #define ENTITY_FLAG_REGISTER 1 #define ENTITY_FLAG_BINARY 2 #define ENTITY_FLAG_FW_NO_ATTACH 3 +#define ENTITY_FLAG_NEED_MBOX 4 /* file_name matches Linux cxgb4 debugfs entry names. */ struct el {char *name; char *file_name; int bit; u32 flag; }; @@ -228,83 +244,95 @@ static struct el ATTRIBUTE_UNUSED entity_list[] = { {"all", "all", CUDBG_ALL, ENTITY_FLAG_NULL}, {"regdump", "regdump", CUDBG_REG_DUMP, 1 << ENTITY_FLAG_REGISTER}, /* {"reg", CUDBG_REG_DUMP},*/ - {"devlog", "devlog", CUDBG_DEV_LOG, ENTITY_FLAG_NULL}, - {"cimla", "cim_la", CUDBG_CIM_LA, ENTITY_FLAG_NULL}, - {"cimmala", "cim_ma_la", CUDBG_CIM_MA_LA, ENTITY_FLAG_NULL}, - {"cimqcfg", "cim_qcfg", CUDBG_CIM_QCFG, ENTITY_FLAG_NULL}, - {"ibqtp0", "ibq_tp0", CUDBG_CIM_IBQ_TP0, ENTITY_FLAG_NULL}, - {"ibqtp1", "ibq_tp1", CUDBG_CIM_IBQ_TP1, ENTITY_FLAG_NULL}, - {"ibqulp", "ibq_ulp", CUDBG_CIM_IBQ_ULP, ENTITY_FLAG_NULL}, - {"ibqsge0", "ibq_sge0", CUDBG_CIM_IBQ_SGE0, ENTITY_FLAG_NULL}, - {"ibqsge1", "ibq_sge1", CUDBG_CIM_IBQ_SGE1, ENTITY_FLAG_NULL}, - {"ibqncsi", "ibq_ncsi", CUDBG_CIM_IBQ_NCSI, ENTITY_FLAG_NULL}, - {"obqulp0", "obq_ulp0", CUDBG_CIM_OBQ_ULP0, ENTITY_FLAG_NULL}, - /* {"cimobqulp1", CUDBG_CIM_OBQ_ULP1},*/ - {"obqulp1", "obq_ulp1", CUDBG_CIM_OBQ_ULP1, ENTITY_FLAG_NULL}, - {"obqulp2", "obq_ulp2", CUDBG_CIM_OBQ_ULP2, ENTITY_FLAG_NULL}, - {"obqulp3", "obq_ulp3", CUDBG_CIM_OBQ_ULP3, ENTITY_FLAG_NULL}, - {"obqsge", "obq_sge", CUDBG_CIM_OBQ_SGE, ENTITY_FLAG_NULL}, - {"obqncsi", "obq_ncsi", CUDBG_CIM_OBQ_NCSI, ENTITY_FLAG_NULL}, + {"devlog", "devlog", CUDBG_DEV_LOG, 1 << ENTITY_FLAG_NEED_MBOX}, + {"cimla", "cim_la", CUDBG_CIM_LA, 1 << ENTITY_FLAG_NEED_MBOX}, + {"cimmala", "cim_ma_la", CUDBG_CIM_MA_LA, 1 << ENTITY_FLAG_NEED_MBOX}, + {"cimqcfg", "cim_qcfg", CUDBG_CIM_QCFG, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ibqtp0", "ibq_tp0", CUDBG_CIM_IBQ_TP0, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ibqtp1", "ibq_tp1", CUDBG_CIM_IBQ_TP1, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ibqulp", "ibq_ulp", CUDBG_CIM_IBQ_ULP, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ibqsge0", "ibq_sge0", CUDBG_CIM_IBQ_SGE0, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ibqsge1", "ibq_sge1", CUDBG_CIM_IBQ_SGE1, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ibqncsi", "ibq_ncsi", CUDBG_CIM_IBQ_NCSI, 1 << ENTITY_FLAG_NEED_MBOX}, + {"obqulp0", "obq_ulp0", CUDBG_CIM_OBQ_ULP0, 1 << ENTITY_FLAG_NEED_MBOX}, + {"obqulp1", "obq_ulp1", CUDBG_CIM_OBQ_ULP1, 1 << ENTITY_FLAG_NEED_MBOX}, + {"obqulp2", "obq_ulp2", CUDBG_CIM_OBQ_ULP2, 1 << ENTITY_FLAG_NEED_MBOX}, + {"obqulp3", "obq_ulp3", CUDBG_CIM_OBQ_ULP3, 1 << ENTITY_FLAG_NEED_MBOX}, + {"obqsge", "obq_sge", CUDBG_CIM_OBQ_SGE, 1 << ENTITY_FLAG_NEED_MBOX}, + {"obqncsi", "obq_ncsi", CUDBG_CIM_OBQ_NCSI, 1 << ENTITY_FLAG_NEED_MBOX}, {"edc0", "edc0", CUDBG_EDC0, (1 << ENTITY_FLAG_BINARY)}, {"edc1", "edc1", CUDBG_EDC1, (1 << ENTITY_FLAG_BINARY)}, {"mc0", "mc0", CUDBG_MC0, (1 << ENTITY_FLAG_BINARY)}, {"mc1", "mc1", CUDBG_MC1, (1 << ENTITY_FLAG_BINARY)}, - {"rss", "rss", CUDBG_RSS, ENTITY_FLAG_NULL}, - {"rss_pf_config", "rss_pf_config", CUDBG_RSS_PF_CONF, ENTITY_FLAG_NULL}, - {"rss_key", "rss_key", CUDBG_RSS_KEY, ENTITY_FLAG_NULL}, - {"rss_vf_config", "rss_vf_config", CUDBG_RSS_VF_CONF, ENTITY_FLAG_NULL}, + {"rss", "rss", CUDBG_RSS, 1 << ENTITY_FLAG_NEED_MBOX}, + {"rss_pf_config", "rss_pf_config", CUDBG_RSS_PF_CONF, + 1 << ENTITY_FLAG_NEED_MBOX}, + {"rss_key", "rss_key", CUDBG_RSS_KEY, 1 << ENTITY_FLAG_NEED_MBOX}, + {"rss_vf_config", "rss_vf_config", CUDBG_RSS_VF_CONF, + 1 << ENTITY_FLAG_NEED_MBOX}, {"rss_config", "rss_config", CUDBG_RSS_CONF, ENTITY_FLAG_NULL}, - {"pathmtu", "path_mtus", CUDBG_PATH_MTU, ENTITY_FLAG_NULL}, + {"pathmtu", "path_mtus", CUDBG_PATH_MTU, 1 << ENTITY_FLAG_NEED_MBOX}, {"swstate", "sw_state", CUDBG_SW_STATE, ENTITY_FLAG_NULL}, - {"wtp", "wtp", CUDBG_WTP, ENTITY_FLAG_NULL}, - {"pmstats", "pm_stats", CUDBG_PM_STATS, ENTITY_FLAG_NULL}, - {"hwsched", "hw_sched", CUDBG_HW_SCHED, ENTITY_FLAG_NULL}, - {"tcpstats", "tcp_stats", CUDBG_TCP_STATS, ENTITY_FLAG_NULL}, - {"tperrstats", "tp_err_stats", CUDBG_TP_ERR_STATS, ENTITY_FLAG_NULL}, - {"fcoestats", "fcoe_stats", CUDBG_FCOE_STATS, ENTITY_FLAG_NULL}, - {"rdmastats", "rdma_stats", CUDBG_RDMA_STATS, ENTITY_FLAG_NULL}, + {"wtp", "wtp", CUDBG_WTP, 1 << ENTITY_FLAG_NEED_MBOX}, + {"pmstats", "pm_stats", CUDBG_PM_STATS, 1 << ENTITY_FLAG_NEED_MBOX}, + {"hwsched", "hw_sched", CUDBG_HW_SCHED, 1 << ENTITY_FLAG_NEED_MBOX}, + {"tcpstats", "tcp_stats", CUDBG_TCP_STATS, 1 << ENTITY_FLAG_NEED_MBOX}, + {"tperrstats", "tp_err_stats", CUDBG_TP_ERR_STATS, + 1 << ENTITY_FLAG_NEED_MBOX}, + {"fcoestats", "fcoe_stats", CUDBG_FCOE_STATS, + 1 << ENTITY_FLAG_NEED_MBOX}, + {"rdmastats", "rdma_stats", CUDBG_RDMA_STATS, + 1 << ENTITY_FLAG_NEED_MBOX}, {"tpindirect", "tp_indirect", CUDBG_TP_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, {"sgeindirect", "sge_indirect", CUDBG_SGE_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, - {"cplstats", "cpl_stats", CUDBG_CPL_STATS, ENTITY_FLAG_NULL}, - {"ddpstats", "ddp_stats", CUDBG_DDP_STATS, ENTITY_FLAG_NULL}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, + {"cplstats", "cpl_stats", CUDBG_CPL_STATS, 1 << ENTITY_FLAG_NEED_MBOX}, + {"ddpstats", "ddp_stats", CUDBG_DDP_STATS, 1 << ENTITY_FLAG_NEED_MBOX}, {"wcstats", "wc_stats", CUDBG_WC_STATS, ENTITY_FLAG_NULL}, - {"ulprxla", "ulprx_la", CUDBG_ULPRX_LA, ENTITY_FLAG_NULL}, + {"ulprxla", "ulprx_la", CUDBG_ULPRX_LA, 1 << ENTITY_FLAG_NEED_MBOX}, {"lbstats", "lb_stats", CUDBG_LB_STATS, ENTITY_FLAG_NULL}, - {"tpla", "tp_la", CUDBG_TP_LA, ENTITY_FLAG_NULL}, + {"tpla", "tp_la", CUDBG_TP_LA, 1 << ENTITY_FLAG_NEED_MBOX}, {"meminfo", "meminfo", CUDBG_MEMINFO, ENTITY_FLAG_NULL}, - {"cimpifla", "cim_pif_la", CUDBG_CIM_PIF_LA, ENTITY_FLAG_NULL}, - {"clk", "clk", CUDBG_CLK, ENTITY_FLAG_NULL}, + {"cimpifla", "cim_pif_la", CUDBG_CIM_PIF_LA, + 1 << ENTITY_FLAG_NEED_MBOX}, + {"clk", "clk", CUDBG_CLK, 1 << ENTITY_FLAG_NEED_MBOX}, {"obq_sge_rx_q0", "obq_sge_rx_q0", CUDBG_CIM_OBQ_RXQ0, - ENTITY_FLAG_NULL}, + 1 << ENTITY_FLAG_NEED_MBOX}, {"obq_sge_rx_q1", "obq_sge_rx_q1", CUDBG_CIM_OBQ_RXQ1, - ENTITY_FLAG_NULL}, + 1 << ENTITY_FLAG_NEED_MBOX}, {"macstats", "mac_stats", CUDBG_MAC_STATS, ENTITY_FLAG_NULL}, {"pcieindirect", "pcie_indirect", CUDBG_PCIE_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, {"pmindirect", "pm_indirect", CUDBG_PM_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, - {"full", "full", CUDBG_FULL, ENTITY_FLAG_NULL}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, + {"full", "full", CUDBG_FULL, 1 << ENTITY_FLAG_NEED_MBOX}, {"txrate", "tx_rate", CUDBG_TX_RATE, ENTITY_FLAG_NULL}, - {"tidinfo", "tids", CUDBG_TID_INFO, ENTITY_FLAG_NULL | - (1 << ENTITY_FLAG_FW_NO_ATTACH)}, - {"pcieconfig", "pcie_config", CUDBG_PCIE_CONFIG, ENTITY_FLAG_NULL}, - {"dumpcontext", "dump_context", CUDBG_DUMP_CONTEXT, ENTITY_FLAG_NULL}, - {"mpstcam", "mps_tcam", CUDBG_MPS_TCAM, ENTITY_FLAG_NULL}, + {"tidinfo", "tids", CUDBG_TID_INFO, + (1 << ENTITY_FLAG_FW_NO_ATTACH) | (1 << ENTITY_FLAG_NEED_MBOX)}, + {"pcieconfig", "pcie_config", CUDBG_PCIE_CONFIG, + 1 << ENTITY_FLAG_NEED_MBOX}, + {"dumpcontext", "dump_context", CUDBG_DUMP_CONTEXT, + 1 << ENTITY_FLAG_NEED_MBOX}, + {"mpstcam", "mps_tcam", CUDBG_MPS_TCAM, 1 << ENTITY_FLAG_NEED_MBOX}, {"vpddata", "vpd_data", CUDBG_VPD_DATA, ENTITY_FLAG_NULL}, - {"letcam", "le_tcam", CUDBG_LE_TCAM, ENTITY_FLAG_NULL}, - {"cctrl", "cctrl", CUDBG_CCTRL, ENTITY_FLAG_NULL}, + {"letcam", "le_tcam", CUDBG_LE_TCAM, 1 << ENTITY_FLAG_NEED_MBOX}, + {"cctrl", "cctrl", CUDBG_CCTRL, 1 << ENTITY_FLAG_NEED_MBOX}, {"maindirect", "ma_indirect", CUDBG_MA_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, {"ulptxla", "ulptx_la", CUDBG_ULPTX_LA, ENTITY_FLAG_NULL}, {"extentity", "ext_entity", CUDBG_EXT_ENTITY, ENTITY_FLAG_NULL}, {"upcimindirect", "up_cim_indirect", CUDBG_UP_CIM_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, - {"pbttables", "pbt_tables", CUDBG_PBT_TABLE, ENTITY_FLAG_NULL}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, + {"pbttables", "pbt_tables", CUDBG_PBT_TABLE, + 1 << ENTITY_FLAG_NEED_MBOX}, {"mboxlog", "mboxlog", CUDBG_MBOX_LOG, ENTITY_FLAG_NULL}, {"hmaindirect", "hma_indirect", CUDBG_HMA_INDIRECT, - 1 << ENTITY_FLAG_REGISTER}, + (1 << ENTITY_FLAG_REGISTER) | (1 << ENTITY_FLAG_NEED_MBOX)}, + {"hma", "hma", CUDBG_HMA, (1 << ENTITY_FLAG_BINARY)}, + {"upload", "upload", CUDBG_UPLOAD, ENTITY_FLAG_NULL}, + {"qdesc", "qdesc", CUDBG_QDESC, ENTITY_FLAG_NULL}, + {"modeeprom", "modeeprom", CUDBG_MOD_EEPROM, 1 << ENTITY_FLAG_NEED_MBOX}, }; typedef int (*cudbg_print_cb) (char *str, ...); @@ -346,13 +374,15 @@ struct cudbg_param { u16 mbox_cmds; } mboxlog_param; struct { - u8 caller_string[100]; + const char *caller_string; u8 os_type; } sw_state_param; + struct { + u32 itr; + } yield_param; u64 time; u8 tcb_bit_param; void *adap; - void *access_lock; } u; }; @@ -360,6 +390,26 @@ struct cudbg_param { #define CUDBG_TCB_BRIEF_PARAM 0x1 #define CUDBG_TCB_FROM_CARD_PARAM 0x2 #define CUDBG_TCB_AS_SCB_PARAM 0x4 +#define CUDBG_TCB_AS_FCB_PARAM 0x8 + +enum { + CUDBG_FILE_WRITE_FLUSH = 0, + CUDBG_FILE_WRITE_HEADER = 1, + CUDBG_FILE_WRITE_DATA = 2, + CUDBG_FILE_WRITE_AT_OFFSET = 3, +}; + +#define CUDBG_YIELD_ITERATION 200 + +struct cudbg_init; +typedef int (*cudbg_mc_collect_t)(struct cudbg_init *pdbg_init, u8 mem_type, + u32 start, u32 size, u8 *buf); +typedef u32 (*cudbg_intrinsic_t)(struct cudbg_init *pdbg_init, u32 start, + u32 offset, u32 size, u32 max_size, u8 *buf); +typedef int (*cudbg_write_to_file_t)(u8 op, int off, u8 *data, u32 data_size); +typedef void (*cudbg_lock_t)(void *access_lock); +typedef void (*cudbg_unlock_t)(void *access_lock); +typedef void (*cudbg_yield_t)(struct cudbg_init *pdbg_init); /* * * What is OFFLINE_VIEW_ONLY mode? @@ -370,8 +420,6 @@ struct cudbg_param { struct cudbg_init { struct cudbg_init_hdr header; - struct adapter *adap; /* Pointer to adapter structure - with filled fields */ cudbg_print_cb print; /* Platform dependent print function */ u32 verbose:1; /* Turn on verbose print */ @@ -383,13 +431,72 @@ struct cudbg_init { the collected debug */ u32 info:1; /* Show just the info, Dont interpret */ - u32 reserved:27; u8 dbg_bitmap[CUDBG_MAX_BITMAP_LEN]; /* Bit map to select the dbg - data type to be collected + data type to be collect or viewed */ + void *sw_state_buf; /* */ + u32 sw_state_buflen; /* */ + + unsigned char *hash_table; /* hash table used in + * fastlz compression */ + /* Optional for OFFLINE_VIEW_ONLY mode. Set to NULL for + * OFFLINE_VIEW_ONLY mode */ + struct adapter *adap; /* Pointer to adapter structure + with filled fields */ + u16 dbg_params_cnt; + u16 dbg_reserved; + struct cudbg_param dbg_params[CUDBG_MAX_PARAMS]; + cudbg_mc_collect_t mc_collect_cb; + cudbg_write_to_file_t write_to_file_cb; + void *cur_entity_hdr; + void *access_lock; + cudbg_lock_t lock_cb; + cudbg_unlock_t unlock_cb; + cudbg_yield_t yield_cb; +}; + +enum { + CUDBG_DEVLOG_PARAM = 1, + CUDBG_TIMESTAMP_PARAM = 2, + CUDBG_FW_NO_ATTACH_PARAM = 3, + CUDBG_MBOX_LOG_PARAM = 4, + CUDBG_TCB_BIT_PARAM = 5, + CUDBG_ADAP_PARAM = 6, + CUDBG_GET_PAYLOAD_PARAM = 7, + CUDBG_SW_STATE_PARAM = 8, + CUDBG_FORCE_PARAM = 9, + CUDBG_YIELD_ITER_PARAM = 10, + CUDBG_SKIP_MBOX_PARAM = 11, + CUDBG_SECOLLECT_PARAM = 12, }; +enum { + /* params for os_type */ + CUDBG_OS_TYPE_WINDOWS = 1, + CUDBG_OS_TYPE_LINUX = 2, + CUDBG_OS_TYPE_ESX = 3, + CUDBG_OS_TYPE_UNKNOWN = 4, +}; + +#define CUDBG_IOCTL_VERSION 0x1 + +#ifndef __GNUC__ +#pragma warning(disable : 4200) +#endif + +struct cudbg_ioctl { + u32 cmd; + + u32 version; + u64 size; + + u8 dbg_bitmap[CUDBG_MAX_BITMAP_LEN]; + u16 dbg_params_cnt; + struct cudbg_param dbg_params[CUDBG_MAX_PARAMS]; + + u8 data[0]; /* Must be last */ +}; /********************************* Helper functions *************************/ static inline void set_dbg_bitmap(u8 *bitmap, enum CUDBG_DBG_ENTITY_TYPE type) @@ -408,29 +515,42 @@ static inline void reset_dbg_bitmap(u8 *bitmap, enum CUDBG_DBG_ENTITY_TYPE type) bitmap[index] &= ~(1 << bit); } -/********************************* End of Helper functions - * *************************/ +static inline void init_cudbg_hdr(struct cudbg_init_hdr *hdr) +{ + hdr->major_ver = CUDBG_MAJOR_VERSION; + hdr->minor_ver = CUDBG_MINOR_VERSION; + hdr->build_ver = CUDBG_BUILD_VERSION; + hdr->init_struct_size = sizeof(struct cudbg_init); +} + +/**************************** End of Helper functions *************************/ /* API Prototypes */ -/** - * cudbg_alloc_handle - Allocates and initializes a handle that represents - * cudbg state. Needs to called first before calling any other function. - * - * returns a pointer to memory that has a cudbg_init structure at the begining - * and enough space after that for internal book keeping. - */ +/* + * cudbg_hello2 - Extended cudbg_hello. Caller has provide required memory + * buffer for library initialization. + * ## Parameters ## + * @dbg_init : Pointer to cudbg_init structure. + * @handle : Pointer to the handle that will be returned by cudbglib. + * @buf : Pointer to the buffer, for the use of cudbglib. + * @buf_size : Pointer to the variable containing the size of buffer. + * Cudbglib sets the size of the required buffer if + * CUDBG_STATUS_SMALL_BUFF is returned. + * ## Return ## + * If the function succeeds, returns 0. + * -ve value represent error. -void *cudbg_alloc_handle(void); -static inline struct cudbg_init *cudbg_get_init(void *handle) -{ - return (handle); -} + * Caller can first pass buf_size as 0, to find the size of buffer required by cudbglib. Then + * call cudbg_hello2() with correct buf and buf_size, after buffer allocation. + */ +int cudbg_hello2(IN struct cudbg_init *dbg_init, OUT void **handle, IN u8 *buf, + INOUT u32 *buf_size); /** - * cudbg_collect - Collect and store debug information. + * cudbg_collect - To collect and store debug information. * ## Parameters ## - * @handle : A pointer returned by cudbg_alloc_handle. + * @handle : A pointer returned by cudbg_init. * @outbuf : pointer to output buffer, to store the collected information * or to use it as a scratch buffer in case HW flash is used to * store the debug information. @@ -440,35 +560,13 @@ static inline struct cudbg_init *cudbg_get_init(void *handle) * collected and stored. * -ve value represent error. */ -int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size); +int cudbg_collect(IN void *handle, OUT void *outbuf, INOUT u32 *outbuf_size); /** - * cudbg_free_handle - Release cudbg resources. - * ## Parameters ## - * @handle : A pointer returned by cudbg_alloc_handle. - */ - -void cudbg_free_handle(IN void *handle); - -/** - * cudbg_read_flash_data - Read cudbg “flash” header from adapter flash. - * This will be used by the consumer mainly to - * know the size of the data in flash. - * ## Parameters ## - * @handle : A pointer returned by cudbg_hello. - * @data : A pointer to data/header buffer - */ - -int cudbg_read_flash_details(void *handle, struct cudbg_flash_hdr *data); - -/** - * cudbg_read_flash_data - Read cudbg dump contents stored in flash. + * cudbg_bye - To exit cudbg framework. * ## Parameters ## * @handle : A pointer returned by cudbg_hello. - * @data_buf : A pointer to data buffer. - * @data_buf_size : Data buffer size. */ - -int cudbg_read_flash_data(void *handle, void *data_buf, u32 data_buf_size); +int cudbg_bye(IN void *handle); #endif /* _CUDBG_IF_H_ */ diff --git a/sys/dev/cxgbe/cudbg/cudbg_common.c b/sys/dev/cxgbe/cudbg/cudbg_common.c index f780e626da0..303e08eede4 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_common.c +++ b/sys/dev/cxgbe/cudbg/cudbg_common.c @@ -33,6 +33,32 @@ __FBSDID("$FreeBSD$"); #include "common/common.h" #include "cudbg.h" #include "cudbg_lib_common.h" +#include "fastlz_common.h" + +int get_scratch_buff_aligned(struct cudbg_buffer *pdbg_buff, u32 size, + struct cudbg_buffer *pscratch_buff, u32 align) +{ + u64 off, mask = align - 1; + u32 scratch_offset; + int rc = 0; + + scratch_offset = pdbg_buff->size - size; + off = (uintptr_t)((u8 *)pdbg_buff->data + scratch_offset) & mask; + scratch_offset -= off; + size += off; + if (pdbg_buff->offset > (int)scratch_offset || + pdbg_buff->size < size) { + rc = CUDBG_STATUS_NO_SCRATCH_MEM; + goto err; + } else { + pscratch_buff->data = (char *)pdbg_buff->data + scratch_offset; + pscratch_buff->offset = 0; + pscratch_buff->size = size; + pdbg_buff->size -= size; + } +err: + return rc; +} int get_scratch_buff(struct cudbg_buffer *pdbg_buff, u32 size, struct cudbg_buffer *pscratch_buff) @@ -41,7 +67,6 @@ int get_scratch_buff(struct cudbg_buffer *pdbg_buff, u32 size, int rc = 0; scratch_offset = pdbg_buff->size - size; - if (pdbg_buff->offset > (int)scratch_offset || pdbg_buff->size < size) { rc = CUDBG_STATUS_NO_SCRATCH_MEM; goto err; @@ -51,7 +76,6 @@ int get_scratch_buff(struct cudbg_buffer *pdbg_buff, u32 size, pscratch_buff->size = size; pdbg_buff->size -= size; } - err: return rc; } @@ -69,28 +93,76 @@ void release_scratch_buff(struct cudbg_buffer *pscratch_buff, pscratch_buff->size = 0; } -static inline void init_cudbg_hdr(struct cudbg_init_hdr *hdr) +/* cudbg_hello2 : extended version of cudbg_hello + * calling method: + * 1. first call to cudbg_hello2 with buf_size == 0 will fill buf_size with + * required size + * 2. second call will be actual cudbg_hello2 with previous call buf_size*/ +int cudbg_hello2(struct cudbg_init *dbg_init, void **handle, u8 *buf, + u32 *buf_size) { - hdr->major_ver = CUDBG_MAJOR_VERSION; - hdr->minor_ver = CUDBG_MINOR_VERSION; - hdr->build_ver = CUDBG_BUILD_VERSION; - hdr->init_struct_size = sizeof(struct cudbg_init); + struct cudbg_private *context; + u32 total_size = sizeof(struct cudbg_private) + + sizeof(struct cudbg_flash_sec_info) + + sizeof(char *) * FASTLZ_HASH_SIZE; + + if (*buf_size < total_size) { + *buf_size = total_size; + return CUDBG_STATUS_SMALL_BUFF; + } + + if (buf == NULL) + return CUDBG_STATUS_INVALID_BUFF; + + context = (struct cudbg_private *)buf; + memset(context, 0, sizeof(struct cudbg_private)); + context->psec_info = (struct cudbg_flash_sec_info *)(buf + + sizeof(struct cudbg_private)); + dbg_init->hash_table = (unsigned char *)(context->psec_info) + + sizeof(struct cudbg_flash_sec_info); + memcpy(&(context->dbg_init), dbg_init, sizeof(struct cudbg_init)); + *handle = (void *)context; + + return 0; } -void * -cudbg_alloc_handle(void) +static void reset_sec_info(struct cudbg_flash_sec_info *psec_info) { - struct cudbg_private *handle; + memset(psec_info, 0, sizeof(struct cudbg_flash_sec_info)); +} + +int cudbg_bye(void *handle) +{ + struct cudbg_private *context = (struct cudbg_private *)handle; - handle = malloc(sizeof(*handle), M_CXGBE, M_ZERO | M_WAITOK); - init_cudbg_hdr(&handle->dbg_init.header); + reset_sec_info(context->psec_info); + return 0; +} - return (handle); +int cudbg_sge_ctxt_check_valid(u32 *buf, int type) +{ + int index, bit, bit_pos = 0; + + switch (type) { + case CTXT_EGRESS: + bit_pos = 176; + break; + case CTXT_INGRESS: + bit_pos = 141; + break; + case CTXT_FLM: + bit_pos = 89; + break; + } + index = bit_pos / 32; + bit = bit_pos % 32; + return buf[index] & (1U << bit); } -void -cudbg_free_handle(void *handle) +void cudbg_update_entity_hdr(struct cudbg_init *pdbg_init, u32 size) { + struct cudbg_entity_hdr *entity_hdr = + (struct cudbg_entity_hdr *)pdbg_init->cur_entity_hdr; - free(handle, M_CXGBE); + entity_hdr->size += size; } diff --git a/sys/dev/cxgbe/cudbg/cudbg_entity.h b/sys/dev/cxgbe/cudbg/cudbg_entity.h index 2bbe0db0e5c..98ebbb8d733 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_entity.h +++ b/sys/dev/cxgbe/cudbg/cudbg_entity.h @@ -40,19 +40,26 @@ #define MC1_FLAG 2 #define EDC0_FLAG 3 #define EDC1_FLAG 4 +#define HMA_FLAG 5 + +#define CUDBG_MEM_ALIGN 32 +#define CUDBG_MEM_CHUNK_SIZE 65536 /* Must be a multiple of @CUDBG_MEM_ALIGN */ +#define CUDBG_MEM_TOT_READ_SIZE (CUDBG_MEM_CHUNK_SIZE * CUDBG_NTHREADS) #define NUM_PCIE_CONFIG_REGS 0x61 -#define CUDBG_CTXT_SIZE_BYTES 24 -#define CUDBG_MAX_INGRESS_QIDS 65536 -#define CUDBG_MAX_FL_QIDS 2048 -#define CUDBG_MAX_CNM_QIDS 1024 +#define CUDBG_MAX_FL_QIDS 1024 #define CUDBG_LOWMEM_MAX_CTXT_QIDS 256 #define ETH_ALEN 6 #define CUDBG_MAX_RPLC_SIZE 128 #define CUDBG_NUM_REQ_REGS 17 #define CUDBG_MAX_TCAM_TID 0x800 +#define CUDBG_T6_CLIP 1536 +#define CUDBG_MAX_TID_COMP_EN 6144 +#define CUDBG_MAX_TID_COMP_DIS 3072 #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 +#define CUDBG_NUM_ULPTX_ASIC 6 +#define CUDBG_NUM_ULPTX_ASIC_READ 128 #define SN_REG_ADDR 0x183f #define BN_REG_ADDR 0x1819 @@ -101,10 +108,29 @@ #define CUDBG_ENTITY_SIGNATURE 0xCCEDB001 #define CUDBG_TID_INFO_REV 1 #define CUDBG_MAC_STATS_REV 1 - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(_a) (sizeof((_a)) / sizeof((_a)[0])) -#endif +#define CUDBG_ULPTX_LA_REV 1 +#define CUDBG_MEMINFO_REV 1 +#define CUDBG_LETCAM_REV 1 +#define CUDBG_MODEEPROM_REV 0 + +enum le_entry_types { + LE_ET_UNKNOWN = 0, + LE_ET_TCAM_HPFILTER = 1, + LE_ET_TCAM_CON = 2, + LE_ET_TCAM_SERVER = 3, + LE_ET_TCAM_FILTER = 4, + LE_ET_TCAM_CLIP = 5, + LE_ET_TCAM_ROUTING = 6, + LE_ET_HASH_CON = 7, + /* Reserve for future regions */ + LE_ET_TCAM_MAX = 16, +}; + +struct port_data { + u8 port_type; /* firmware port type */ + u8 mod_type; /* firmware module type */ + u8 tx_chan; +}; struct cudbg_pbt_tables { u32 pbt_dynamic[CUDBG_PBT_DYNAMIC_ENTRIES]; @@ -118,6 +144,7 @@ struct card_mem { u16 size_mc1; u16 size_edc0; u16 size_edc1; + u16 size_hma; u16 mem_flag; u16 res; }; @@ -143,13 +170,25 @@ struct cudbg_tcam { u32 max_tid; }; -#if 0 -struct cudbg_mbox_log { - struct mbox_cmd entry; - u32 hi[MBOX_LEN / 8]; - u32 lo[MBOX_LEN / 8]; +struct cudbg_letcam_region { + u8 type; + u32 start; + u32 nentries; + + u8 reserved[64]; +}; + +struct cudbg_letcam { + struct cudbg_ver_hdr ver_hdr; + + u8 nregions; + u32 region_hdr_size; + + u32 max_tid; + u32 tid_data_hdr_size; + + u8 reserved[64]; }; -#endif struct cudbg_tid_data { u32 tid; @@ -252,11 +291,13 @@ struct struct_wc_stats { u32 wr_cl_fail; }; -struct struct_ulptx_la { +struct cudbg_ulptx_la { u32 rdptr[CUDBG_NUM_ULPTX]; u32 wrptr[CUDBG_NUM_ULPTX]; u32 rddata[CUDBG_NUM_ULPTX]; u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ]; + u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ]; + u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC]; }; struct struct_ulprx_la { @@ -327,23 +368,12 @@ struct struct_port_usage { u32 alloc; }; -struct struct_lpbk_usage { - u32 id; - u32 used; - u32 alloc; -}; - struct struct_mem_desc { u32 base; u32 limit; u32 idx; }; -enum string_size_units { - STRING_UNITS_10, /* use powers of 10^3 (standard SI) */ - STRING_UNITS_2, /* use binary powers of 2^10 */ -}; - struct struct_meminfo { struct struct_mem_desc avail[4]; struct struct_mem_desc mem[ARRAY_SIZE(region) + 3]; @@ -361,6 +391,9 @@ struct struct_meminfo { u32 port_alloc[4]; u32 loopback_used[NCHAN]; u32 loopback_alloc[NCHAN]; + u32 pstructs_free_cnt; + u32 free_rx_cnt; + u32 free_tx_cnt; }; #ifndef __GNUC__ @@ -386,10 +419,7 @@ struct struct_clk_info { u32 cclk_ps; u32 tre; u32 dack_re; - char core_clk_period[32]; - char tp_timer_tick[32]; - char tcp_tstamp_tick[32]; - char dack_tick[32]; + u8 reserved[128]; }; struct cim_pif_la { @@ -671,17 +701,6 @@ struct tp_mib_data { struct tp_mib_type TP_MIB_RQE_DFR_PKT; }; -struct cudbg_reg_info { - const char *name; - unsigned int addr; - unsigned int len; -}; - -struct tp1_reg_info { - char addr[10]; - char name[40]; -}; - struct ireg_field { u32 ireg_addr; u32 ireg_data; @@ -694,6 +713,14 @@ struct ireg_buf { u32 outbuf[32]; }; +struct sge_qbase_reg_field { + u32 reg_addr; + u32 reg_data[4]; + u32 pf_data_value[8][4]; /* [max pf][4 data reg SGE_QBASE_MAP[0-3] */ + u32 vf_data_value[256][4]; /* [max vf][4 data reg SGE_QBASE_MAP[0-3] */ + u32 vfcount; +}; + struct tx_rate { u64 nrate[NCHAN]; u64 orate[NCHAN]; @@ -734,7 +761,12 @@ struct tid_info_region_rev1 { struct cudbg_ver_hdr ver_hdr; struct tid_info_region tid; u32 tid_start; - u32 reserved[16]; + u32 nhash; + u32 clip_base; + u32 nclip; + u32 route_base; + u32 nroute; + u32 reserved[11]; }; struct struct_vpd_data { @@ -758,6 +790,90 @@ struct sw_state { u32 reserved1[16]; }; +enum cudbg_qdesc_qtype { + CUDBG_QTYPE_UNKNOWN = 0, + CUDBG_QTYPE_NIC_TXQ, + CUDBG_QTYPE_NIC_RXQ, + CUDBG_QTYPE_NIC_FLQ, + CUDBG_QTYPE_CTRLQ, + CUDBG_QTYPE_FWEVTQ, + CUDBG_QTYPE_INTRQ, + CUDBG_QTYPE_PTP_TXQ, + CUDBG_QTYPE_OFLD_TXQ, + CUDBG_QTYPE_RDMA_RXQ, + CUDBG_QTYPE_RDMA_FLQ, + CUDBG_QTYPE_RDMA_CIQ, + CUDBG_QTYPE_ISCSI_RXQ, + CUDBG_QTYPE_ISCSI_FLQ, + CUDBG_QTYPE_ISCSIT_RXQ, + CUDBG_QTYPE_ISCSIT_FLQ, + CUDBG_QTYPE_CRYPTO_TXQ, + CUDBG_QTYPE_CRYPTO_RXQ, + CUDBG_QTYPE_CRYPTO_FLQ, + CUDBG_QTYPE_TLS_RXQ, + CUDBG_QTYPE_TLS_FLQ, + CUDBG_QTYPE_ETHOFLD_TXQ, + CUDBG_QTYPE_ETHOFLD_RXQ, + CUDBG_QTYPE_ETHOFLD_FLQ, + CUDBG_QTYPE_MAX, +}; + +#define CUDBG_QDESC_REV 1 + +struct cudbg_qdesc_entry { + u32 data_size; + u32 qtype; + u32 qid; + u32 desc_size; + u32 num_desc; + u8 data[0]; /* Must be last */ +}; + +struct cudbg_qdesc_info { + u32 qdesc_entry_size; + u32 num_queues; + u8 data[0]; /* Must be last */ +}; + +/* EEPROM Standards for plug in modules */ +enum { + CUDBG_MODULE_SFF_8079 = 1, + CUDBG_MODULE_SFF_8472 = 2, + CUDBG_MODULE_SFF_8636 = 3, + CUDBG_MODULE_SFF_8436 = 4, + CUDBG_MODULE_SFF_MAX, +}; + +#define CUDBG_MODULE_SFF_8079_LEN 256 +#define CUDBG_MODULE_SFF_8472_LEN 512 +#define CUDBG_MODULE_SFF_8636_LEN 256 +#define CUDBG_MODULE_SFF_8436_LEN 256 + +static u32 ATTRIBUTE_UNUSED eth_module_sff_len_array[CUDBG_MODULE_SFF_MAX] = { + [CUDBG_MODULE_SFF_8079] = CUDBG_MODULE_SFF_8079_LEN, + [CUDBG_MODULE_SFF_8472] = CUDBG_MODULE_SFF_8472_LEN, + [CUDBG_MODULE_SFF_8636] = CUDBG_MODULE_SFF_8636_LEN, + [CUDBG_MODULE_SFF_8436] = CUDBG_MODULE_SFF_8436_LEN, +}; + +/** + * struct cudbg_modinfo - plugin module eeprom information + * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx + * @eeprom_len: Length of the eeprom + * + */ +struct cudbg_modinfo { + __u32 type; + __u32 eeprom_len; +}; + +struct cudbg_module_eeprom { + struct cudbg_ver_hdr ver_hdr; + struct cudbg_modinfo modinfo[MAX_NPORTS]; + u8 nports; + u8 data[0]; /* Must be last */ +}; + static u32 ATTRIBUTE_UNUSED t6_tp_pio_array[][4] = { {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ @@ -873,37 +989,144 @@ static u32 ATTRIBUTE_UNUSED t5_sge_dbg_index_array[9][4] = { {0x10cc, 0x10d4, 0x0, 16}, }; -static u32 ATTRIBUTE_UNUSED t6_up_cim_reg_array[][4] = { - {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */ - {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ - -}; - -static u32 ATTRIBUTE_UNUSED t5_up_cim_reg_array[][4] = { - {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */ - {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ -}; +static u32 ATTRIBUTE_UNUSED t6_sge_qbase_index_array[5] = { + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ + 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, +}; + +static u32 ATTRIBUTE_UNUSED t6_up_cim_reg_array[][5] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ + {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ + {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ + {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ + {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ + {0x7b50, 0x7b54, 0x4920, 0x10, 0x10},/* up_cim_4920_to_4a10 */ + {0x7b50, 0x7b54, 0x4924, 0x10, 0x10},/* up_cim_4924_to_4a14 */ + {0x7b50, 0x7b54, 0x4928, 0x10, 0x10},/* up_cim_4928_to_4a18 */ + {0x7b50, 0x7b54, 0x492c, 0x10, 0x10},/* up_cim_492c_to_4a1c */ +}; + +static u32 ATTRIBUTE_UNUSED t5_up_cim_reg_array[][5] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ +}; + +static u32 ATTRIBUTE_UNUSED t5_letcam_region_reg_array[LE_ET_TCAM_MAX] = { + [LE_ET_TCAM_SERVER] = A_LE_DB_SERVER_INDEX, + [LE_ET_TCAM_FILTER] = A_LE_DB_FILTER_TABLE_INDEX, + [LE_ET_TCAM_CLIP] = A_LE_DB_CLIP_TABLE_INDEX, + [LE_ET_TCAM_ROUTING] = A_LE_DB_ROUTING_TABLE_INDEX, + [LE_ET_HASH_CON] = A_LE_DB_TID_HASHBASE, +}; + +static u32 ATTRIBUTE_UNUSED letcam_region_reg_array[LE_ET_TCAM_MAX] = { + [LE_ET_TCAM_HPFILTER] = A_LE_DB_HPRI_FILT_TABLE_START_INDEX, + [LE_ET_TCAM_CON] = A_LE_DB_ACTIVE_TABLE_START_INDEX, + [LE_ET_TCAM_SERVER] = A_LE_DB_SRVR_START_INDEX, + [LE_ET_TCAM_FILTER] = A_LE_DB_NORM_FILT_TABLE_START_INDEX, + [LE_ET_TCAM_CLIP] = A_LE_DB_CLCAM_TID_BASE, + [LE_ET_HASH_CON] = A_T6_LE_DB_HASH_TID_BASE, +}; + +static inline char *cudbg_letcam_type_to_string(u8 type) +{ + switch (type) { + case LE_ET_TCAM_HPFILTER: + return "HP Filter"; + case LE_ET_TCAM_CON: + return "Active"; + case LE_ET_TCAM_SERVER: + return "Server"; + case LE_ET_TCAM_FILTER: + return "Filter"; + case LE_ET_TCAM_CLIP: + return "Clip"; + case LE_ET_TCAM_ROUTING: + return "Route"; + case LE_ET_HASH_CON: + return "Hash"; + case LE_ET_TCAM_MAX: + return "Max"; + } + + return "Unknown"; +} + +#define CUDBG_LETCAM_RSPDATA_IPV6_INDEX 16 +#define CUDBG_LETCAM_RSPDATA_IPV6_MASK 0x8000 + +#define CUDBG_LETCAM_RSPDATA_IPV6_ACTIVE_INDEX 9 +#define CUDBG_LETCAM_RSPDATA_IPV6_ACTIVE_VALUE 0x00C00000 + +static inline u8 cudbg_letcam_get_type(u32 tid, struct cudbg_letcam *letcam, + struct cudbg_letcam_region *le_region) +{ + u32 i; + + for (i = 0; i < letcam->nregions; i++) { + if (tid >= le_region->start && + tid < le_region->start + le_region->nentries) + return le_region->type; + + le_region = (struct cudbg_letcam_region *) + (((u8 *)le_region) + + letcam->region_hdr_size); + } + + return LE_ET_UNKNOWN; +} + +static inline int cudbg_letcam_is_ipv6_entry(struct cudbg_tid_data *tid_data, + struct cudbg_letcam *letcam, + struct cudbg_letcam_region *le_region) +{ + int ipv6; + + /* IPv6 TIDs must at least be on a 2-slot boundary */ + if (tid_data->tid & 1) + return 0; + + /* Ensure IPv6 protocol MSB bit is set in the response data */ + if (!(tid_data->data[CUDBG_LETCAM_RSPDATA_IPV6_INDEX] & + CUDBG_LETCAM_RSPDATA_IPV6_MASK)) + return 0; + + ipv6 = 1; + + /* For active region, an additional check is needed + * to ensure IPv6 connection is really offloaded. + */ + if (cudbg_letcam_get_type(tid_data->tid, letcam, le_region) == + LE_ET_TCAM_CON && + tid_data->data[CUDBG_LETCAM_RSPDATA_IPV6_ACTIVE_INDEX] != + CUDBG_LETCAM_RSPDATA_IPV6_ACTIVE_VALUE) + ipv6 = 0; + + return ipv6; +} #endif diff --git a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c index 6a39373d3e2..eefbde1cf19 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c +++ b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2017 Chelsio Communications, Inc. + * Copyright (c) 2017-2021 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -49,39 +49,38 @@ enum { }; int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size); -int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size, - u32 start_address); -void -update_skip_size(struct cudbg_flash_sec_info *sec_info, u32 size) +void update_skip_size(struct cudbg_flash_sec_info *psec_info, u32 size) { - sec_info->skip_size += size; + psec_info->skip_size += size; } -static -void set_sector_availability(struct cudbg_flash_sec_info *sec_info, - int sector_nu, int avail) +u32 get_skip_size(struct cudbg_flash_sec_info *psec_info) +{ + return psec_info->skip_size; +} + +static void set_sector_availability(struct cudbg_flash_sec_info *psec_info, + int sector_nu, int avail) { sector_nu -= CUDBG_START_SEC; if (avail) - set_dbg_bitmap(sec_info->sec_bitmap, sector_nu); + set_dbg_bitmap(psec_info->sec_bitmap, sector_nu); else - reset_dbg_bitmap(sec_info->sec_bitmap, sector_nu); + reset_dbg_bitmap(psec_info->sec_bitmap, sector_nu); } /* This function will return empty sector available for filling */ -static int -find_empty_sec(struct cudbg_flash_sec_info *sec_info) +static int find_empty_sec(struct cudbg_flash_sec_info *psec_info) { int i, index, bit; for (i = CUDBG_START_SEC; i < CUDBG_SF_MAX_SECTOR; i++) { index = (i - CUDBG_START_SEC) / 8; bit = (i - CUDBG_START_SEC) % 8; - if (!(sec_info->sec_bitmap[index] & (1 << bit))) + if (!(psec_info->sec_bitmap[index] & (1 << bit))) return i; } - return CUDBG_STATUS_FLASH_FULL; } @@ -91,22 +90,22 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff, u64 timestamp, u32 cur_entity_hdr_offset, u32 start_offset, u32 ext_size) { - struct cudbg_private *priv = handle; - struct cudbg_flash_sec_info *sec_info = &priv->sec_info; void *sec_hdr; struct cudbg_hdr *cudbg_hdr; struct cudbg_flash_hdr *flash_hdr; struct cudbg_entity_hdr *entity_hdr; + struct cudbg_flash_sec_info *psec_info; u32 hdr_offset; u32 data_hdr_size; u32 total_hdr_size; u32 sec_hdr_start_addr; + psec_info = ((struct cudbg_private *)handle)->psec_info; data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) + sizeof(struct cudbg_hdr); total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr); sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size; - sec_hdr = sec_info->sec_data + sec_hdr_start_addr; + sec_hdr = psec_info->sec_data + sec_hdr_start_addr; flash_hdr = (struct cudbg_flash_hdr *)(sec_hdr); cudbg_hdr = (struct cudbg_hdr *)dbg_buff->data; @@ -114,7 +113,7 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff, /* initially initialize flash hdr and copy all data headers and * in next calling (else part) copy only current entity header */ - if ((start_offset - sec_info->skip_size) == data_hdr_size) { + if ((start_offset - psec_info->skip_size) == data_hdr_size) { flash_hdr->signature = CUDBG_FL_SIGNATURE; flash_hdr->major_ver = CUDBG_FL_MAJOR_VERSION; flash_hdr->minor_ver = CUDBG_FL_MINOR_VERSION; @@ -133,14 +132,14 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff, sizeof(struct cudbg_entity_hdr)); hdr_offset = data_hdr_size + sizeof(struct cudbg_flash_hdr); - flash_hdr->data_len = cudbg_hdr->data_len - sec_info->skip_size; + flash_hdr->data_len = cudbg_hdr->data_len - psec_info->skip_size; flash_hdr->timestamp = timestamp; entity_hdr = (struct cudbg_entity_hdr *)((char *)sec_hdr + sizeof(struct cudbg_flash_hdr) + cur_entity_hdr_offset); /* big entity like mc need to be skipped */ - entity_hdr->start_offset -= sec_info->skip_size; + entity_hdr->start_offset -= psec_info->skip_size; cudbg_hdr = (struct cudbg_hdr *)((char *)sec_hdr + sizeof(struct cudbg_flash_hdr)); @@ -154,12 +153,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data, u32 cur_entity_size, u32 ext_size) { - struct cudbg_private *priv = handle; - struct cudbg_init *cudbg_init = &priv->dbg_init; - struct cudbg_flash_sec_info *sec_info = &priv->sec_info; - struct adapter *adap = cudbg_init->adap; + struct cudbg_init *cudbg_init = NULL; + struct adapter *adap = NULL; struct cudbg_flash_hdr *flash_hdr = NULL; struct cudbg_buffer *dbg_buff = (struct cudbg_buffer *)data; + struct cudbg_flash_sec_info *psec_info; + struct cudbg_private *context; u32 data_hdr_size; u32 total_hdr_size; u32 tmp_size; @@ -170,15 +169,21 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data, int rc = 0; int sec; + context = (struct cudbg_private *)handle; + cudbg_init = &(context->dbg_init); + psec_info = context->psec_info; + adap = cudbg_init->adap; + data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) + sizeof(struct cudbg_hdr); total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr); sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size; sec_data_size = sec_hdr_start_addr; - cudbg_init->print("\tWriting %u bytes to flash\n", cur_entity_size); + cudbg_init->print("\tWriting %u bytes to flash\n", + cur_entity_size); - /* this function will get header if sec_info->sec_data does not + /* this function will get header if psec_info->sec_data does not * have any header and * will update the header if it has header */ @@ -191,7 +196,7 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data, start_offset = dbg_buff->offset - cur_entity_size; } - flash_hdr = (struct cudbg_flash_hdr *)(sec_info->sec_data + + flash_hdr = (struct cudbg_flash_hdr *)(psec_info->sec_data + sec_hdr_start_addr); if (flash_hdr->data_len > CUDBG_FLASH_SIZE) { @@ -207,16 +212,16 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data, } while (cur_entity_size > 0) { - sec = find_empty_sec(sec_info); - if (sec_info->par_sec) { - sec_data_offset = sec_info->par_sec_offset; - set_sector_availability(sec_info, sec_info->par_sec, 0); - sec_info->par_sec = 0; - sec_info->par_sec_offset = 0; + sec = find_empty_sec(psec_info); + if (psec_info->par_sec) { + sec_data_offset = psec_info->par_sec_offset; + set_sector_availability(psec_info, psec_info->par_sec, 0); + psec_info->par_sec = 0; + psec_info->par_sec_offset = 0; } else { - sec_info->cur_seq_no++; - flash_hdr->sec_seq_no = sec_info->cur_seq_no; + psec_info->cur_seq_no++; + flash_hdr->sec_seq_no = psec_info->cur_seq_no; sec_data_offset = 0; } @@ -224,22 +229,22 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data, tmp_size = sec_data_size - sec_data_offset; } else { tmp_size = cur_entity_size; - sec_info->par_sec = sec; - sec_info->par_sec_offset = cur_entity_size + + psec_info->par_sec = sec; + psec_info->par_sec_offset = cur_entity_size + sec_data_offset; } - memcpy((void *)((char *)sec_info->sec_data + sec_data_offset), + memcpy((void *)((char *)psec_info->sec_data + sec_data_offset), (void *)((char *)dbg_buff->data + start_offset), tmp_size); - rc = write_flash(adap, sec, sec_info->sec_data, + rc = write_flash(adap, sec, psec_info->sec_data, CUDBG_SF_SECTOR_SIZE); if (rc) goto out; cur_entity_size -= tmp_size; - set_sector_availability(sec_info, sec, 1); + set_sector_availability(psec_info, sec, 1); start_offset += tmp_size; } out: @@ -252,11 +257,9 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size) unsigned int i, n; unsigned int sf_sec_size; int rc = 0; - u8 *ptr = (u8 *)data; sf_sec_size = adap->params.sf_size/adap->params.sf_nsec; - addr = start_sec * CUDBG_SF_SECTOR_SIZE; i = DIV_ROUND_UP(size,/* # of sectors spanned */ sf_sec_size); @@ -267,7 +270,6 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size) * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter OptionROM Configuration File. */ - if (rc || size == 0) goto out; @@ -289,204 +291,3 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size) out: return rc; } - -int cudbg_read_flash_details(void *handle, struct cudbg_flash_hdr *data) -{ - int rc; - rc = cudbg_read_flash(handle, (void *)data, - sizeof(struct cudbg_flash_hdr), 0); - - return rc; -} - -int cudbg_read_flash_data(void *handle, void *buf, u32 buf_size) -{ - int rc; - u32 total_hdr_size, data_header_size; - void *payload = NULL; - u32 payload_size = 0; - - data_header_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) + - sizeof(struct cudbg_hdr); - total_hdr_size = data_header_size + sizeof(struct cudbg_flash_hdr); - - /* Copy flash header to buffer */ - rc = cudbg_read_flash(handle, buf, total_hdr_size, 0); - if (rc != 0) - goto out; - payload = (char *)buf + total_hdr_size; - payload_size = buf_size - total_hdr_size; - - /* Reading flash data to buf */ - rc = cudbg_read_flash(handle, payload, payload_size, 1); - if (rc != 0) - goto out; - -out: - return rc; -} - -int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag) -{ - struct cudbg_private *priv = handle; - struct cudbg_init *cudbg_init = &priv->dbg_init; - struct cudbg_flash_sec_info *sec_info = &priv->sec_info; - struct adapter *adap = cudbg_init->adap; - struct cudbg_flash_hdr flash_hdr; - u32 total_hdr_size; - u32 data_hdr_size; - u32 sec_hdr_start_addr; - u32 tmp_size; - u32 data_offset = 0; - u32 i, j; - int rc; - - rc = t4_get_flash_params(adap); - if (rc) { - cudbg_init->print("\nGet flash params failed." - "Try Again...readflash\n\n"); - return rc; - } - - data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) + - sizeof(struct cudbg_hdr); - total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr); - sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size; - - if (!data_flag) { - /* fill header */ - if (!sec_info->max_timestamp) { - /* finding max time stamp because it may - * have older filled sector also - */ - memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr)); - rc = read_flash(adap, CUDBG_START_SEC, &flash_hdr, - sizeof(struct cudbg_flash_hdr), - sec_hdr_start_addr); - - if (flash_hdr.signature == CUDBG_FL_SIGNATURE) { - sec_info->max_timestamp = flash_hdr.timestamp; - } else { - rc = read_flash(adap, CUDBG_START_SEC + 1, - &flash_hdr, - sizeof(struct cudbg_flash_hdr), - sec_hdr_start_addr); - - if (flash_hdr.signature == CUDBG_FL_SIGNATURE) - sec_info->max_timestamp = - flash_hdr.timestamp; - else { - cudbg_init->print("\n\tNo cudbg dump "\ - "found in flash\n\n"); - return CUDBG_STATUS_NO_SIGNATURE; - } - - } - - /* finding max sequence number because max sequenced - * sector has updated header - */ - for (i = CUDBG_START_SEC; i < - CUDBG_SF_MAX_SECTOR; i++) { - memset(&flash_hdr, 0, - sizeof(struct cudbg_flash_hdr)); - rc = read_flash(adap, i, &flash_hdr, - sizeof(struct cudbg_flash_hdr), - sec_hdr_start_addr); - - if (flash_hdr.signature == CUDBG_FL_SIGNATURE && - sec_info->max_timestamp == - flash_hdr.timestamp && - sec_info->max_seq_no <= - flash_hdr.sec_seq_no) { - if (sec_info->max_seq_no == - flash_hdr.sec_seq_no) { - if (sec_info->hdr_data_len < - flash_hdr.data_len) - sec_info->max_seq_sec = i; - } else { - sec_info->max_seq_sec = i; - sec_info->hdr_data_len = - flash_hdr.data_len; - } - sec_info->max_seq_no = flash_hdr.sec_seq_no; - } - } - } - rc = read_flash(adap, sec_info->max_seq_sec, - (struct cudbg_flash_hdr *)data, - size, sec_hdr_start_addr); - - if (rc) - cudbg_init->print("Read flash header failed, rc %d\n", - rc); - - return rc; - } - - /* finding sector sequence sorted */ - for (i = 1; i <= sec_info->max_seq_no; i++) { - for (j = CUDBG_START_SEC; j < CUDBG_SF_MAX_SECTOR; j++) { - memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr)); - rc = read_flash(adap, j, &flash_hdr, - sizeof(struct cudbg_flash_hdr), - sec_hdr_start_addr); - - if (flash_hdr.signature == - CUDBG_FL_SIGNATURE && - sec_info->max_timestamp == - flash_hdr.timestamp && - flash_hdr.sec_seq_no == i) { - if (size + total_hdr_size > - CUDBG_SF_SECTOR_SIZE) - tmp_size = CUDBG_SF_SECTOR_SIZE - - total_hdr_size; - else - tmp_size = size; - - if ((i != sec_info->max_seq_no) || - (i == sec_info->max_seq_no && - j == sec_info->max_seq_sec)){ - /* filling data buffer with sector data - * except sector header - */ - rc = read_flash(adap, j, - (void *)((char *)data + - data_offset), - tmp_size, 0); - data_offset += (tmp_size); - size -= (tmp_size); - break; - } - } - } - } - - return rc; -} - -int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size, - u32 start_address) -{ - unsigned int addr, i, n; - int rc; - u32 *ptr = (u32 *)data; - addr = start_sec * CUDBG_SF_SECTOR_SIZE + start_address; - size = size / 4; - for (i = 0; i < size; i += SF_PAGE_SIZE) { - if ((size - i) < SF_PAGE_SIZE) - n = size - i; - else - n = SF_PAGE_SIZE; - rc = t4_read_flash(adap, addr, n, ptr, 0); - if (rc) - goto out; - - addr = addr + (n*4); - ptr += n; - } - - return 0; -out: - return rc; -} diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.c b/sys/dev/cxgbe/cudbg/cudbg_lib.c index aeebd81e335..2dcfe5ba42d 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_lib.c +++ b/sys/dev/cxgbe/cudbg/cudbg_lib.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2017 Chelsio Communications, Inc. + * Copyright (c) 2017-2021 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,26 +34,54 @@ __FBSDID("$FreeBSD$"); #include "common/t4_regs.h" #include "cudbg.h" #include "cudbg_lib_common.h" -#include "cudbg_lib.h" #include "cudbg_entity.h" +#include "cudbg_lib.h" +#include "fastlz.h" + #define BUFFER_WARN_LIMIT 10000000 -struct large_entity large_entity_list[] = { - {CUDBG_EDC0, 0, 0}, - {CUDBG_EDC1, 0 , 0}, - {CUDBG_MC0, 0, 0}, - {CUDBG_MC1, 0, 0} -}; +#define GET_SCRATCH_BUFF(dbg_buff, size, scratch_buff) \ +do { \ + rc = get_scratch_buff(dbg_buff, size, scratch_buff); \ + if (rc) \ + return rc; \ +} while (0) + +#define WRITE_AND_COMPRESS_SCRATCH_BUFF(scratch_buff, dbg_buff) \ +do { \ + struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)(dbg_buff->data); \ + if (cudbg_hdr->compress_type == CUDBG_COMPRESSION_NONE) { \ + rc = write_to_buf(pdbg_init, dbg_buff->data, dbg_buff->size, \ + &dbg_buff->offset, (scratch_buff)->data, \ + (scratch_buff)->size); \ + } else { \ + rc = write_compression_hdr(pdbg_init, scratch_buff, dbg_buff); \ + if (rc) \ + goto err1; \ + rc = compress_buff(pdbg_init, scratch_buff, dbg_buff); \ + } \ +} while (0) + +#define WRITE_AND_RELEASE_SCRATCH_BUFF(scratch_buff, dbg_buff) \ +do { \ + WRITE_AND_COMPRESS_SCRATCH_BUFF(scratch_buff, dbg_buff); \ +err1: \ + release_scratch_buff(scratch_buff, dbg_buff); \ +} while (0) + static int is_fw_attached(struct cudbg_init *pdbg_init) { + if (pdbg_init->dbg_params[CUDBG_FW_NO_ATTACH_PARAM].param_type == + CUDBG_FW_NO_ATTACH_PARAM) + return 0; return (pdbg_init->adap->flags & FW_OK); } /* This function will add additional padding bytes into debug_buffer to make it * 4 byte aligned.*/ -static void align_debug_buffer(struct cudbg_buffer *dbg_buff, +void align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr) { u8 zero_buf[4] = {0}; @@ -67,29 +95,157 @@ static void align_debug_buffer(struct cudbg_buffer *dbg_buff, dbg_buff->offset += padding; entity_hdr->num_pad = padding; } - entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset; } +/* Same as align_debug_buffer() above, except, entity_hdr->size is not + * calculated here, but rather updated only. + */ +static int align_and_update_debug_buffer(struct cudbg_init *pdbg_init, + struct cudbg_entity_hdr *entity_hdr) +{ + u8 zero_buf[4] = {0}; + u8 padding, remain; + int rc; + + remain = entity_hdr->size % 4; + padding = 4 - remain; + if (remain) { + rc = pdbg_init->write_to_file_cb(CUDBG_FILE_WRITE_DATA, 0, + zero_buf, padding); + if (rc) + return rc; + + entity_hdr->num_pad = padding; + entity_hdr->size += padding; + } + return 0; +} + +static void cudbg_pcie_cdbg_read(struct cudbg_init *pdbg_init, u32 *buff, u32 nregs, + u32 start_index) +{ + cudbg_access_lock_acquire(pdbg_init); + t4_read_indirect(pdbg_init->adap, A_PCIE_CDEBUG_INDEX, 0x5a18, + buff, nregs, start_index); + cudbg_access_lock_release(pdbg_init); +} + +static int cudbg_get_portinfo(struct cudbg_init *cudbg, u8 port, struct port_data *pi) +{ + struct adapter *adapter = cudbg->adap; + struct fw_port_cmd cmd; + u32 lstatus; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(port)); + cmd.action_to_len16 = + cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); + if (ret) + return ret; + + lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype); + + pi->tx_chan = port; + pi->port_type = G_FW_PORT_CMD_PTYPE(lstatus); + pi->mod_type = G_FW_PORT_CMD_MODTYPE(lstatus); + + return 0; +} + +static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type) +{ + return (fw_mod_type != FW_PORT_MOD_TYPE_NONE && + fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED && + fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN && + fw_mod_type != FW_PORT_MOD_TYPE_ERROR); +} + +static int cudbg_get_module_info(struct cudbg_init *cudbg, struct port_data *pi, + struct cudbg_modinfo *modinfo) +{ + u8 sff8472_comp, sff_diag_type, sff_rev; + struct adapter *adapter = cudbg->adap; + int ret; + + if (!t4_is_inserted_mod_type(pi->mod_type)) + return -EINVAL; + + switch (pi->port_type) { + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSA: + case FW_PORT_TYPE_SFP28: + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR, + SFF_8472_COMP_LEN, &sff8472_comp); + if (ret) + return ret; + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR, + SFP_DIAG_TYPE_LEN, &sff_diag_type); + if (ret) + return ret; + + if (!sff8472_comp || (sff_diag_type & 4)) { + modinfo->type = CUDBG_MODULE_SFF_8079; + modinfo->eeprom_len = + eth_module_sff_len_array[CUDBG_MODULE_SFF_8079]; + } else { + modinfo->type = CUDBG_MODULE_SFF_8472; + modinfo->eeprom_len = + eth_module_sff_len_array[CUDBG_MODULE_SFF_8472]; + } + break; + + case FW_PORT_TYPE_QSFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_CR2_QSFP: + case FW_PORT_TYPE_CR4_QSFP: + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, SFF_REV_ADDR, + SFF_REV_LEN, &sff_rev); + /* For QSFP type ports, revision value >= 3 + * means the SFP is 8636 compliant. + */ + if (ret) + return ret; + if (sff_rev >= 0x3) { + modinfo->type = CUDBG_MODULE_SFF_8636; + modinfo->eeprom_len = + eth_module_sff_len_array[CUDBG_MODULE_SFF_8636]; + } else { + modinfo->type = CUDBG_MODULE_SFF_8436; + modinfo->eeprom_len = + eth_module_sff_len_array[CUDBG_MODULE_SFF_8436]; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, enum ctxt_type ctype, u32 *data) { struct adapter *padap = pdbg_init->adap; int rc = -1; - if (is_fw_attached(pdbg_init)) { - rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, - "t4cudf"); - if (rc != 0) - goto out; - rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, - data); - end_synchronized_op(padap, 0); - } - -out: - if (rc) + if (is_fw_attached(pdbg_init)) + rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data); + if (rc) { + cudbg_access_lock_acquire(pdbg_init); t4_sge_ctxt_rd_bd(padap, cid, ctype, data); + cudbg_access_lock_release(pdbg_init); + } } static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size, @@ -97,10 +253,10 @@ static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size, struct cudbg_entity_hdr **entity_hdr) { struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; - int rc = 0; u32 ext_offset = cudbg_hdr->data_len; - *ext_size = 0; + int rc = 0; + *ext_size = 0; if (dbg_buff->size - dbg_buff->offset <= sizeof(struct cudbg_entity_hdr)) { rc = CUDBG_STATUS_BUFFER_SHORT; @@ -109,35 +265,31 @@ static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size, *entity_hdr = (struct cudbg_entity_hdr *) ((char *)outbuf + cudbg_hdr->data_len); - /* Find the last extended entity header */ while ((*entity_hdr)->size) { ext_offset += sizeof(struct cudbg_entity_hdr) + (*entity_hdr)->size; - *ext_size += (*entity_hdr)->size + sizeof(struct cudbg_entity_hdr); - if (dbg_buff->size - dbg_buff->offset + *ext_size <= sizeof(struct cudbg_entity_hdr)) { rc = CUDBG_STATUS_BUFFER_SHORT; goto err; } - if (ext_offset != (*entity_hdr)->next_ext_offset) { + if (*ext_size != (*entity_hdr)->next_ext_offset) { ext_offset -= sizeof(struct cudbg_entity_hdr) + (*entity_hdr)->size; + *ext_size -= (*entity_hdr)->size + + sizeof(struct cudbg_entity_hdr); break; } - (*entity_hdr)->next_ext_offset = *ext_size; - *entity_hdr = (struct cudbg_entity_hdr *) ((char *)outbuf + ext_offset); } - /* update the data offset */ dbg_buff->offset = ext_offset; err: @@ -149,39 +301,42 @@ static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff, u32 cur_entity_size, int entity_nu, u32 ext_size) { - struct cudbg_private *priv = handle; - struct cudbg_init *cudbg_init = &priv->dbg_init; - struct cudbg_flash_sec_info *sec_info = &priv->sec_info; - u64 timestamp; u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr); + struct cudbg_init *cudbg_init = NULL; + struct cudbg_flash_sec_info *psec_info; + struct cudbg_private *context; u32 remain_flash_size; u32 flash_data_offset; u32 data_hdr_size; + u64 timestamp; int rc = -1; + context = (struct cudbg_private *)handle; + cudbg_init = &(context->dbg_init); + psec_info = context->psec_info; + data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) + sizeof(struct cudbg_hdr); - flash_data_offset = (FLASH_CUDBG_NSECS * (sizeof(struct cudbg_flash_hdr) + data_hdr_size)) + - (cur_entity_data_offset - data_hdr_size); + (cur_entity_data_offset - data_hdr_size) - + get_skip_size(psec_info); if (flash_data_offset > CUDBG_FLASH_SIZE) { - update_skip_size(sec_info, cur_entity_size); - if (cudbg_init->verbose) - cudbg_init->print("Large entity skipping...\n"); + update_skip_size(psec_info, cur_entity_size); + cudbg_init->print("FAIL - no space left in flash. Skipping...\n"); return rc; } remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset; - if (cur_entity_size > remain_flash_size) { - update_skip_size(sec_info, cur_entity_size); - if (cudbg_init->verbose) - cudbg_init->print("Large entity skipping...\n"); + update_skip_size(psec_info, cur_entity_size); + cudbg_init->print("FAIL - entity too large to write to flash. Skipping...\n"); } else { - timestamp = 0; + timestamp = + cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM]. + u.time; cur_entity_hdr_offset += (sizeof(struct cudbg_entity_hdr) * @@ -192,34 +347,62 @@ static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff, cur_entity_hdr_offset, cur_entity_size, ext_size); - if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose) + if (rc == CUDBG_STATUS_FLASH_FULL) cudbg_init->print("\n\tFLASH is full... " "can not write in flash more\n\n"); } - return rc; } int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) { - struct cudbg_entity_hdr *entity_hdr = NULL; + struct cudbg_private *context = (struct cudbg_private *)handle; + struct cudbg_init *cudbg_init = &(context->dbg_init); struct cudbg_entity_hdr *ext_entity_hdr = NULL; - struct cudbg_hdr *cudbg_hdr; - struct cudbg_buffer dbg_buff; - struct cudbg_error cudbg_err = {0}; - int large_entity_code; - - u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap; - struct cudbg_init *cudbg_init = - &(((struct cudbg_private *)handle)->dbg_init); + u8 *dbg_bitmap = context->dbg_init.dbg_bitmap; + int large_entity_code, large_entity_list_size; + struct cudbg_entity_hdr *entity_hdr = NULL; + struct large_entity large_entity_list[] = { + {CUDBG_EDC0, 0, 0}, + {CUDBG_EDC1, 0, 0}, + {CUDBG_MC0, 0, 0}, + {CUDBG_MC1, 0, 0} + }; struct adapter *padap = cudbg_init->adap; + struct cudbg_param *dbg_param = NULL; + struct cudbg_error cudbg_err = {0}; u32 total_size, remaining_buf_size; - u32 ext_size = 0; - int index, bit, i, rc = -1; - int all; + int j, entity_priority_list_size; + int index, bit, i, rc = -1, all; + u32 hdr_size = 0, ext_off = 0; + struct cudbg_buffer dbg_buff; + struct cudbg_hdr *cudbg_hdr; + bool do_file_write = 0; + bool skip_se = false; bool flag_ext = 0; + u32 ext_size = 0; + u32 fw_err_val; + + large_entity_list_size = ARRAY_SIZE(large_entity_list); + reset_skip_entity(large_entity_list, large_entity_list_size); + + dbg_param = &cudbg_init->dbg_params[CUDBG_FW_NO_ATTACH_PARAM]; + + do_file_write = cudbg_init->write_to_file_cb ? true : false; - reset_skip_entity(); + /* Don't talk to firmware if it's crashed */ + fw_err_val = t4_read_reg(padap, A_PCIE_FW); + if (dbg_param->param_type != CUDBG_FW_NO_ATTACH_PARAM && + (fw_err_val & F_PCIE_FW_ERR)) + dbg_param->param_type = CUDBG_FW_NO_ATTACH_PARAM; + + /* If no valid mbox is found and if firmware is still alive, + * skip all side effect entities that may conflict with + * firmware or driver. + */ + if ((cudbg_init->dbg_params[CUDBG_SKIP_MBOX_PARAM].param_type == + CUDBG_SKIP_MBOX_PARAM) && !(fw_err_val & F_PCIE_FW_ERR)) + skip_se = true; dbg_buff.data = outbuf; dbg_buff.size = *outbuf_size; @@ -231,32 +414,31 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION; cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION; cudbg_hdr->max_entities = CUDBG_MAX_ENTITY; - cudbg_hdr->chip_ver = padap->params.chipid; - + cudbg_hdr->chip_ver = (padap->params.chipid << 4) | padap->params.rev; if (cudbg_hdr->data_len) flag_ext = 1; if (cudbg_init->use_flash) { -#ifndef notyet + /* We can't do write to file and write to flash at the + * same time. + */ + if (do_file_write) + return CUDBG_STATUS_NOT_SUPPORTED; + rc = t4_get_flash_params(padap); if (rc) { - if (cudbg_init->verbose) - cudbg_init->print("\nGet flash params failed.\n\n"); + cudbg_init->print("\nGet flash params failed.\n\n"); cudbg_init->use_flash = 0; } -#endif -#ifdef notyet /* Timestamp is mandatory. If it is not passed then disable * flash support */ if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) { - if (cudbg_init->verbose) - cudbg_init->print("\nTimestamp param missing," + cudbg_init->print("\nTimestamp param missing," "so ignoring flash write request\n\n"); cudbg_init->use_flash = 0; } -#endif } if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY > @@ -284,16 +466,37 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) sizeof(struct cudbg_entity_hdr); } + hdr_size = cudbg_hdr->hdr_len + CUDBG_MAX_ENTITY * + sizeof(struct cudbg_entity_hdr); + if (do_file_write) { + /* Write initial cudbg header to file. Do this to ensure + * data gets written after the cudbg header. + */ + rc = cudbg_init->write_to_file_cb(CUDBG_FILE_WRITE_HEADER, 0, + (u8 *)cudbg_hdr, hdr_size); + if (rc) + goto err; + } + total_size = dbg_buff.offset; all = dbg_bitmap[0] & (1 << CUDBG_ALL); - /*sort(large_entity_list);*/ - - for (i = 1; i < CUDBG_MAX_ENTITY; i++) { + entity_priority_list_size = sizeof(entity_priority_list) / sizeof(int); + /* entity_priority_list_size does not include CUDBG_ALL so + * entity_priority_list_size + 1 */ + if (entity_priority_list_size != (CUDBG_MAX_ENTITY - 1)) + cudbg_init->print("WARNING: CUDBG_MAX_ENTITY(%d) and "\ + "entity_priority_list size(%d) mismatch\n", + CUDBG_MAX_ENTITY, + entity_priority_list_size + 1); + + for( j = 0; j < entity_priority_list_size; j++) { + i = entity_priority_list[j]; index = i / 8; bit = i % 8; - if (entity_list[i].bit == CUDBG_EXT_ENTITY) + if (entity_list[i].bit == CUDBG_EXT_ENTITY || + entity_list[i].bit == CUDBG_QDESC) continue; if (all || (dbg_bitmap[index] & (1 << bit))) { @@ -313,51 +516,116 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) /* move the offset after the ext header */ dbg_buff.offset += sizeof(struct cudbg_entity_hdr); + + ext_off = total_size + ext_size; + if (do_file_write) { + /* Write initial entity header to + * file. Do this to ensure ext entity + * data is written after the header. + */ + rc = cudbg_init->write_to_file_cb( + CUDBG_FILE_WRITE_AT_OFFSET, + ext_off, (u8 *)entity_hdr, + sizeof(struct cudbg_entity_hdr)); + if (rc) + goto err; + } } entity_hdr->entity_type = i; - entity_hdr->start_offset = dbg_buff.offset; - /* process each entity by calling process_entity fp */ - remaining_buf_size = dbg_buff.size - dbg_buff.offset; + if (do_file_write) { + /* If we're immediately writing to file, then + * the outbuf is reused, so update the entity + * header accordingly. + */ + entity_hdr->start_offset = total_size; + if (flag_ext) + entity_hdr->start_offset += ext_size + + sizeof(struct cudbg_entity_hdr); - if ((remaining_buf_size <= BUFFER_WARN_LIMIT) && - is_large_entity(i)) { - if (cudbg_init->verbose) - cudbg_init->print("Skipping %s\n", - entity_list[i].name); - skip_entity(i); - continue; + cudbg_init->cur_entity_hdr = (void *)entity_hdr; } else { + entity_hdr->start_offset = dbg_buff.offset; + } - /* If fw_attach is 0, then skip entities which - * communicates with firmware - */ - - if (!is_fw_attached(cudbg_init) && - (entity_list[i].flag & - (1 << ENTITY_FLAG_FW_NO_ATTACH))) { - if (cudbg_init->verbose) - cudbg_init->print("Skipping %s entity,"\ - "because fw_attach "\ - "is 0\n", + if (!do_file_write) { + remaining_buf_size = dbg_buff.size - + dbg_buff.offset; + if ((remaining_buf_size <= BUFFER_WARN_LIMIT) && + is_large_entity(large_entity_list, + large_entity_list_size, + i)) { + cudbg_init->print("Skipping %s\n", entity_list[i].name); + skip_entity(large_entity_list, + large_entity_list_size, i); continue; } + } + + /* If fw_attach is 0, then skip entities which + * communicates with firmware + */ + + if (dbg_param->param_type == CUDBG_FW_NO_ATTACH_PARAM && + (entity_list[i].flag & + (1 << ENTITY_FLAG_FW_NO_ATTACH))) { + cudbg_init->print("Skipping %s entity, because fw_attach is 0\n", + entity_list[i].name); + continue; + } - if (cudbg_init->verbose) - cudbg_init->print("collecting debug entity: "\ - "%s\n", entity_list[i].name); - memset(&cudbg_err, 0, - sizeof(struct cudbg_error)); - rc = process_entity[i-1](cudbg_init, &dbg_buff, - &cudbg_err); + if (cudbg_init->verbose) { + cudbg_init->print("collecting debug entity[%d]: %s\n", + i, entity_list[i].name); + } + memset(&cudbg_err, 0, sizeof(struct cudbg_error)); + if (skip_se && + (entity_list[i].flag & + (1 << ENTITY_FLAG_NEED_MBOX))) { + cudbg_init->print("No Mbox available. Skipping %s entity\n", + entity_list[i].name); + rc = CUDBG_STATUS_NO_MBOX_PERM; + } else { + /* process each entity by calling process_entity + * fp + */ + rc = begin_synchronized_op(padap, NULL, + SLEEP_OK | INTR_OK, "t4dbg1"); + if (rc == 0) { + if (hw_off_limits(padap)) + rc = ENXIO; + else + rc = process_entity[i-1](cudbg_init, + &dbg_buff, &cudbg_err); + end_synchronized_op(padap, 0); + } } if (rc) { entity_hdr->size = 0; - dbg_buff.offset = entity_hdr->start_offset; - } else - align_debug_buffer(&dbg_buff, entity_hdr); + if (do_file_write) + /* Reuse outbuf for collecting next + * entity + */ + dbg_buff.offset = hdr_size; + else { + memset((char *)outbuf + entity_hdr->start_offset, 0, + dbg_buff.offset - entity_hdr->start_offset); + dbg_buff.offset = + entity_hdr->start_offset; + } + } else { + if (do_file_write) { + rc = align_and_update_debug_buffer(cudbg_init, + entity_hdr); + if (rc) + return rc; + } else { + align_debug_buffer(&dbg_buff, + entity_hdr); + } + } if (cudbg_err.sys_err) rc = CUDBG_SYSTEM_ERROR; @@ -385,6 +653,26 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) ext_entity_hdr->size = ext_size; entity_hdr->next_ext_offset = ext_size; entity_hdr->flag |= CUDBG_EXT_DATA_VALID; + if (do_file_write) { + /* Flush all cached data to file. Do + * this to ensure that we can write + * the next extended entity's header + * appropriately after the data. + */ + rc = cudbg_init->write_to_file_cb( + CUDBG_FILE_WRITE_FLUSH, + 0, NULL, 0); + if (rc) + goto err; + + /* Update entity header in file */ + rc = cudbg_init->write_to_file_cb( + CUDBG_FILE_WRITE_AT_OFFSET, + ext_off, (u8 *)entity_hdr, + sizeof(struct cudbg_entity_hdr)); + if (rc) + goto err; + } } if (cudbg_init->use_flash) { @@ -432,11 +720,12 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) /* If fw_attach is 0, then skip entities which * communicates with firmware */ - if (!is_fw_attached(cudbg_init) && + + if (dbg_param->param_type == + CUDBG_FW_NO_ATTACH_PARAM && (entity_list[large_entity_code].flag & (1 << ENTITY_FLAG_FW_NO_ATTACH))) { - if (cudbg_init->verbose) - cudbg_init->print("Skipping %s entity,"\ + cudbg_init->print("Skipping %s entity,"\ "because fw_attach "\ "is 0\n", entity_list[large_entity_code] @@ -446,16 +735,35 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) entity_hdr->entity_type = large_entity_code; entity_hdr->start_offset = dbg_buff.offset; - if (cudbg_init->verbose) + if (cudbg_init->verbose) { cudbg_init->print("Re-trying debug entity: %s\n", - entity_list[large_entity_code].name); + entity_list[large_entity_code].name); + } memset(&cudbg_err, 0, sizeof(struct cudbg_error)); - rc = process_entity[large_entity_code - 1](cudbg_init, - &dbg_buff, - &cudbg_err); + if (skip_se && + (entity_list[i].flag & + (1 << ENTITY_FLAG_NEED_MBOX))) { + cudbg_init->print("No Mbox available. Skipping %s entity\n", + entity_list[i].name); + rc = CUDBG_STATUS_NO_MBOX_PERM; + } else { + rc = begin_synchronized_op(padap, NULL, + SLEEP_OK | INTR_OK, "t4dbg2"); + if (rc == 0) { + if (hw_off_limits(padap)) + rc = ENXIO; + else + rc = process_entity[large_entity_code - 1](cudbg_init, + &dbg_buff, &cudbg_err); + end_synchronized_op(padap, 0); + } + } + if (rc) { entity_hdr->size = 0; + memset((char *)outbuf + entity_hdr->start_offset, 0, + dbg_buff.offset - entity_hdr->start_offset); dbg_buff.offset = entity_hdr->start_offset; } else align_debug_buffer(&dbg_buff, entity_hdr); @@ -511,52 +819,66 @@ int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size) } } - cudbg_hdr->data_len = total_size; + if (flag_ext) + total_size += ext_size; + *outbuf_size = total_size; + cudbg_hdr->data_len = total_size; - if (flag_ext) - *outbuf_size += ext_size; + if (do_file_write) { + /* Flush all cached data to file */ + rc = cudbg_init->write_to_file_cb(CUDBG_FILE_WRITE_FLUSH, 0, + NULL, 0); + if (rc) + return rc; + + /* Update cudbg header */ + rc = cudbg_init->write_to_file_cb(CUDBG_FILE_WRITE_HEADER, 0, + (void *)cudbg_hdr, hdr_size); + if (rc) + return rc; + } return 0; err: return rc; } -void reset_skip_entity(void) +void reset_skip_entity(struct large_entity *large_entity_list, + int large_entity_list_size) { int i; - for (i = 0; i < ARRAY_SIZE(large_entity_list); i++) + for (i = 0; i < large_entity_list_size; i++) large_entity_list[i].skip_flag = 0; } -void skip_entity(int entity_code) +void skip_entity(struct large_entity *large_entity_list, + int large_entity_list_size, int entity_code) { int i; - for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity); - i++) { + + for (i = 0; i < large_entity_list_size; i++) if (large_entity_list[i].entity_code == entity_code) large_entity_list[i].skip_flag = 1; - } } -int is_large_entity(int entity_code) +int is_large_entity(struct large_entity *large_entity_list, + int large_entity_list_size, int entity_code) { int i; - for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity); - i++) { + for (i = 0; i < large_entity_list_size; i++) if (large_entity_list[i].entity_code == entity_code) return 1; - } return 0; } int get_entity_hdr(void *outbuf, int i, u32 size, struct cudbg_entity_hdr **entity_hdr) { - int rc = 0; struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; + int rc = 0; if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size) return CUDBG_STATUS_SMALL_BUFF; @@ -567,127 +889,94 @@ int get_entity_hdr(void *outbuf, int i, u32 size, return rc; } -static int collect_rss(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_rss(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - u32 size; + u32 nentries = padap->chip_params->rss_nentries; int rc = 0; - size = padap->chip_params->rss_nentries * sizeof(u16); - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, nentries * sizeof(u16), &scratch_buff); rc = t4_read_rss(padap, (u16 *)scratch_buff.data); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n", + pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_sw_state(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_sw_state(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; + struct cudbg_param *dbg_param = NULL; struct cudbg_buffer scratch_buff; struct sw_state *swstate; - u32 size; int rc = 0; - size = sizeof(struct sw_state); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*swstate), &scratch_buff); swstate = (struct sw_state *) scratch_buff.data; - swstate->fw_state = t4_read_reg(padap, A_PCIE_FW); - snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s", - "FreeBSD"); - swstate->os_type = 0; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + dbg_param = &pdbg_init->dbg_params[CUDBG_SW_STATE_PARAM]; + if (dbg_param->param_type == CUDBG_SW_STATE_PARAM) { + strncpy((char *)swstate->caller_string, + (const char *)dbg_param->u.sw_state_param.caller_string, + sizeof(swstate->caller_string)); + swstate->os_type = dbg_param->u.sw_state_param.os_type; + } else { + strncpy((char *)swstate->caller_string, + "Unknown", sizeof(swstate->caller_string)); + swstate->os_type = CUDBG_OS_TYPE_UNKNOWN; + } + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_ddp_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_ddp_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct tp_usm_stats *tp_usm_stats_buff; - u32 size; + struct cudbg_buffer scratch_buff; int rc = 0; - size = sizeof(struct tp_usm_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tp_usm_stats_buff), &scratch_buff); tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data; - - /* spin_lock(&padap->stats_lock); TODO*/ - t4_get_usm_stats(padap, tp_usm_stats_buff, 1); - /* spin_unlock(&padap->stats_lock); TODO*/ - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + t4_get_usm_stats(padap, tp_usm_stats_buff, true); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_ulptx_la(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; + struct cudbg_ulptx_la *ulptx_la_buff; struct cudbg_buffer scratch_buff; - struct struct_ulptx_la *ulptx_la_buff; - u32 size, i, j; + struct cudbg_ver_hdr *ver_hdr; int rc = 0; + u32 i, j; - size = sizeof(struct struct_ulptx_la); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; + GET_SCRATCH_BUFF(dbg_buff, + sizeof(struct cudbg_ver_hdr) + sizeof(*ulptx_la_buff), + &scratch_buff); + ver_hdr = (struct cudbg_ver_hdr *)scratch_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_ULPTX_LA_REV; + ver_hdr->size = sizeof(struct cudbg_ulptx_la); - ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data; + ulptx_la_buff = (struct cudbg_ulptx_la *) (scratch_buff.data + + sizeof(struct cudbg_ver_hdr)); for (i = 0; i < CUDBG_NUM_ULPTX; i++) { ulptx_la_buff->rdptr[i] = t4_read_reg(padap, @@ -706,107 +995,75 @@ static int collect_ulptx_la(struct cudbg_init *pdbg_init, } } - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); + /* dumping ULP_TX_ASIC_DEBUG */ + for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { + t4_write_reg(padap, A_ULP_TX_ASIC_DEBUG_CTRL, 0x1); + ulptx_la_buff->rdptr_asic[i] = + t4_read_reg(padap, A_ULP_TX_ASIC_DEBUG_CTRL); + ulptx_la_buff->rddata_asic[i][0] = + t4_read_reg(padap, A_ULP_TX_ASIC_DEBUG_0); + ulptx_la_buff->rddata_asic[i][1] = + t4_read_reg(padap, A_ULP_TX_ASIC_DEBUG_1); + ulptx_la_buff->rddata_asic[i][2] = + t4_read_reg(padap, A_ULP_TX_ASIC_DEBUG_2); + ulptx_la_buff->rddata_asic[i][3] = + t4_read_reg(padap, A_ULP_TX_ASIC_DEBUG_3); + ulptx_la_buff->rddata_asic[i][4] = + t4_read_reg(padap, A_ULP_TX_ASIC_DEBUG_4); + ulptx_la_buff->rddata_asic[i][5] = + t4_read_reg(padap, PM_RX_BASE_ADDR); + } -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; - } -static int collect_ulprx_la(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct struct_ulprx_la *ulprx_la_buff; - u32 size; + struct cudbg_buffer scratch_buff; int rc = 0; - size = sizeof(struct struct_ulprx_la); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*ulprx_la_buff), &scratch_buff); ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data; t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data); ulprx_la_buff->size = ULPRX_LA_SIZE; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_cpl_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cpl_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { + struct struct_tp_cpl_stats *tp_cpl_stats_buff; struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - struct struct_tp_cpl_stats *tp_cpl_stats_buff; - u32 size; int rc = 0; - size = sizeof(struct struct_tp_cpl_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tp_cpl_stats_buff), &scratch_buff); tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data; tp_cpl_stats_buff->nchan = padap->chip_params->nchan; - - /* spin_lock(&padap->stats_lock); TODO*/ - t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1); - /* spin_unlock(&padap->stats_lock); TODO*/ - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, true); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_wc_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_wc_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct struct_wc_stats *wc_stats_buff; - u32 val1; - u32 val2; - u32 size; - + struct cudbg_buffer scratch_buff; + u32 val1, val2; int rc = 0; - size = sizeof(struct struct_wc_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*wc_stats_buff), &scratch_buff); wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data; - if (!is_t4(padap)) { val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL); val2 = t4_read_reg(padap, A_SGE_STAT_MATCH); @@ -816,15 +1073,7 @@ static int collect_wc_stats(struct cudbg_init *pdbg_init, wc_stats_buff->wr_cl_success = 0; wc_stats_buff->wr_cl_fail = 0; } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } @@ -839,8 +1088,8 @@ static int fill_meminfo(struct adapter *padap, { struct struct_mem_desc *md; u32 size, lo, hi; - u32 used, alloc; int n, i, rc = 0; + u32 used, alloc; size = sizeof(struct struct_meminfo); @@ -857,9 +1106,7 @@ static int fill_meminfo(struct adapter *padap, } i = 0; - lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE); - if (lo & F_EDRAM0_ENABLE) { hi = t4_read_reg(padap, A_MA_EDRAM0_BAR); meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20; @@ -908,6 +1155,16 @@ static int fill_meminfo(struct adapter *padap, meminfo_buff->avail[i].idx = 2; i++; } + + if (lo & F_HMA_MUX) { + hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR); + meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20; + meminfo_buff->avail[i].limit = + meminfo_buff->avail[i].base + + (G_EXT_MEM1_SIZE(hi) << 20); + meminfo_buff->avail[i].idx = 5; + i++; + } } if (!i) { /* no memory available */ @@ -1003,10 +1260,6 @@ static int fill_meminfo(struct adapter *padap, md->limit = 0; md++; #ifndef __NO_DRIVER_OCQ_SUPPORT__ - /*md->base = padap->vres.ocq.start;*/ - /*if (adap->vres.ocq.size)*/ - /* md->limit = md->base + adap->vres.ocq.size - 1;*/ - /*else*/ md->idx = ARRAY_SIZE(region); /* hide it */ md++; #endif @@ -1037,6 +1290,10 @@ static int fill_meminfo(struct adapter *padap, meminfo_buff->up_extmem2_hi = hi; lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE); + for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++) + meminfo_buff->free_rx_cnt += + G_FREERXPAGECOUNT(t4_read_reg(padap, + A_TP_FLM_FREE_RX_CNT)); meminfo_buff->rx_pages_data[0] = G_PMRXMAXPAGE(lo); meminfo_buff->rx_pages_data[1] = t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10; @@ -1044,6 +1301,10 @@ static int fill_meminfo(struct adapter *padap, lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE); hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE); + for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++) + meminfo_buff->free_tx_cnt += + G_FREETXPAGECOUNT(t4_read_reg(padap, + A_TP_FLM_FREE_TX_CNT)); meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo); meminfo_buff->tx_pages_data[1] = hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); @@ -1051,6 +1312,10 @@ static int fill_meminfo(struct adapter *padap, hi >= (1 << 20) ? 'M' : 'K'; meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo); + meminfo_buff->p_structs = t4_read_reg(padap, A_TP_CMM_MM_MAX_PSTRUCT); + meminfo_buff->pstructs_free_cnt = + G_FREEPSTRUCTCOUNT(t4_read_reg(padap, A_TP_FLM_FREE_PS_CNT)); + for (i = 0; i < 4; i++) { if (chip_id(padap) > CHELSIO_T5) lo = t4_read_reg(padap, @@ -1071,7 +1336,7 @@ static int fill_meminfo(struct adapter *padap, for (i = 0; i < padap->chip_params->nchan; i++) { if (chip_id(padap) > CHELSIO_T5) lo = t4_read_reg(padap, - A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); + A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4); if (is_t5(padap)) { @@ -1088,139 +1353,181 @@ static int fill_meminfo(struct adapter *padap, return rc; } -static int collect_meminfo(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct struct_meminfo *meminfo_buff; + struct cudbg_buffer scratch_buff; + struct cudbg_ver_hdr *ver_hdr; int rc = 0; - u32 size; - - size = sizeof(struct struct_meminfo); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - meminfo_buff = (struct struct_meminfo *)scratch_buff.data; + GET_SCRATCH_BUFF(dbg_buff, + sizeof(struct cudbg_ver_hdr) + sizeof(*meminfo_buff), + &scratch_buff); + ver_hdr = (struct cudbg_ver_hdr *)scratch_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_MEMINFO_REV; + ver_hdr->size = sizeof(struct struct_meminfo); + meminfo_buff = (struct struct_meminfo *) (scratch_buff.data + + sizeof(struct cudbg_ver_hdr)); rc = fill_meminfo(padap, meminfo_buff); - if (rc) - goto err; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); if (rc) goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_lb_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +static int get_port_count(struct cudbg_init *pdbg_init) +{ + return (pdbg_init->adap->params.nports); +} + +int cudbg_collect_lb_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; + struct struct_lb_stats *lb_stats_buff; struct cudbg_buffer scratch_buff; struct lb_port_stats *tmp_stats; - struct struct_lb_stats *lb_stats_buff; u32 i, n, size; int rc = 0; - rc = padap->params.nports; - if (rc < 0) - goto err; - - n = rc; + n = get_port_count(pdbg_init); size = sizeof(struct struct_lb_stats) + n * sizeof(struct lb_port_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data; - lb_stats_buff->nchan = n; tmp_stats = lb_stats_buff->s; - for (i = 0; i < n; i += 2, tmp_stats += 2) { t4_get_lb_stats(padap, i, tmp_stats); t4_get_lb_stats(padap, i + 1, tmp_stats+1); } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_rdma_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_er) +int cudbg_collect_rdma_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_er) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct tp_rdma_stats *rdma_stats_buff; - u32 size; + struct cudbg_buffer scratch_buff; int rc = 0; - size = sizeof(struct tp_rdma_stats); + GET_SCRATCH_BUFF(dbg_buff, sizeof(*rdma_stats_buff), &scratch_buff); + rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data; + t4_tp_get_rdma_stats(padap, rdma_stats_buff, true); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); + return rc; +} - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; +static int cudbg_get_module_eeprom(struct cudbg_init *pdbg_init, + struct port_data *pi, u8 *data, u32 data_len) +{ + struct adapter *adapter = pdbg_init->adap; + int ret = 0, offset = 0; + u32 len = data_len; + + memset(data, 0, data_len); + if (offset + len <= I2C_PAGE_SIZE) + return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, offset, len, data); + + /* offset + len spans 0xa0 and 0xa1 pages */ + if (offset <= I2C_PAGE_SIZE) { + /* read 0xa0 page */ + len = I2C_PAGE_SIZE - offset; + ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, + I2C_DEV_ADDR_A0, offset, len, data); + if (ret) + return ret; + offset = I2C_PAGE_SIZE; + /* Remaining bytes to be read from second page = + * Total length - bytes read from first page + */ + len = data_len - len; + } + /* Read additional optical diagnostics from page 0xa2 if supported */ + return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2, + offset, len, &data[data_len - len]); +} - rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data; +int cudbg_collect_module_eeprom(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct cudbg_module_eeprom *mod_eeprom; + struct cudbg_modinfo modinfo[MAX_NPORTS]; + struct cudbg_buffer scratch_buff; + struct port_data pi[MAX_NPORTS]; + u32 size, offset; + u8 nports, i; + int rc = 0; - /* spin_lock(&padap->stats_lock); TODO*/ - t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1); - /* spin_unlock(&padap->stats_lock); TODO*/ + nports = get_port_count(pdbg_init); + size = sizeof(*mod_eeprom); + for (i = 0; i < nports; i++) { + rc = cudbg_get_portinfo(pdbg_init, i, &pi[i]); + if (rc) { + cudbg_err->sys_err = rc; + return rc; + } - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; + rc = cudbg_get_module_info(pdbg_init, &pi[i], &modinfo[i]); + if (rc) { + modinfo[i].eeprom_len = 0; + rc = 0; + } + size += modinfo[i].eeprom_len; + } - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); + memset(scratch_buff.data, 0, size); + mod_eeprom = (struct cudbg_module_eeprom *)scratch_buff.data; + mod_eeprom->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; + mod_eeprom->ver_hdr.revision = CUDBG_MODEEPROM_REV; + /* size of structure of version CUDBG_MODEEPROM_REV */ + mod_eeprom->ver_hdr.size = sizeof(struct cudbg_module_eeprom) - + sizeof(struct cudbg_ver_hdr); + + mod_eeprom->nports = nports; + memcpy(mod_eeprom->modinfo, modinfo, + nports * sizeof(struct cudbg_modinfo)); + + offset = 0; + for (i = 0; i < nports; i++) { + cudbg_get_module_eeprom(pdbg_init, &pi[i], + mod_eeprom->data + offset, + modinfo[i].eeprom_len); + offset += modinfo[i].eeprom_len; + } + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_clk_info(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; struct struct_clk_info *clk_info_buff; + struct cudbg_buffer scratch_buff; u64 tp_tick_us; - int size; int rc = 0; - if (!padap->params.vpd.cclk) { - rc = CUDBG_STATUS_CCLK_NOT_DEFINED; - goto err; - } - - size = sizeof(struct struct_clk_info); - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; + if (!padap->params.vpd.cclk) + return CUDBG_STATUS_CCLK_NOT_DEFINED; + GET_SCRATCH_BUFF(dbg_buff, sizeof(*clk_info_buff), &scratch_buff); clk_info_buff = (struct struct_clk_info *) scratch_buff.data; - clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* in ps */ clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION); @@ -1231,707 +1538,577 @@ static int collect_clk_info(struct cudbg_init *pdbg_init, clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000) * t4_read_reg(padap, A_TP_DACK_TIMER); - clk_info_buff->retransmit_min = tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN); clk_info_buff->retransmit_max = tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX); - clk_info_buff->persist_timer_min = tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN); clk_info_buff->persist_timer_max = tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX); - clk_info_buff->keepalive_idle_timer = tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE); clk_info_buff->keepalive_interval = tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL); - clk_info_buff->initial_srtt = tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT)); clk_info_buff->finwait2_timer = tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; - } -static int collect_macstats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_macstats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { + struct struct_mac_stats_rev1 *mac_stats_buff; struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - struct struct_mac_stats_rev1 *mac_stats_buff; - u32 i, n, size; int rc = 0; + u32 i, n = get_port_count(pdbg_init); - rc = padap->params.nports; - if (rc < 0) - goto err; - - n = rc; - size = sizeof(struct struct_mac_stats_rev1); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*mac_stats_buff), &scratch_buff); mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data; - mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV; mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) - sizeof(struct cudbg_ver_hdr); - mac_stats_buff->port_count = n; - for (i = 0; i < mac_stats_buff->port_count; i++) + for (i = 0; i < mac_stats_buff->port_count; i++) { t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + } + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_cim_pif_la(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct cim_pif_la *cim_pif_la_buff; - u32 size; + struct cudbg_buffer scratch_buff; int rc = 0; + u32 size; - size = sizeof(struct cim_pif_la) + - 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + size = sizeof(*cim_pif_la_buff) + 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data; cim_pif_la_buff->size = CIM_PIFLA_SIZE; - t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data, (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, NULL, NULL); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_tp_la(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; struct struct_tp_la *tp_la_buff; - u32 size; int rc = 0; + u32 size; size = sizeof(struct struct_tp_la) + TPLA_SIZE * sizeof(u64); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); tp_la_buff = (struct struct_tp_la *) scratch_buff.data; - tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG)); t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_fcoe_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_fcoe_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { + struct struct_tp_fcoe_stats *tp_fcoe_stats_buff; struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - struct struct_tp_fcoe_stats *tp_fcoe_stats_buff; - u32 size; - int rc = 0; - - size = sizeof(struct struct_tp_fcoe_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; + int i, rc = 0; + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tp_fcoe_stats_buff), &scratch_buff); tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data; - - /* spin_lock(&padap->stats_lock); TODO*/ - t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1); - t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1); - if (padap->chip_params->nchan == NCHAN) { - t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1); - t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1); + for (i = 0; i < padap->chip_params->nchan; i++) { + t4_get_fcoe_stats(padap, i, &tp_fcoe_stats_buff->stats[i], + true); } - /* spin_unlock(&padap->stats_lock); TODO*/ - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + tp_fcoe_stats_buff->nchan = padap->chip_params->nchan; + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_tp_err_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_tp_err_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { + struct struct_tp_err_stats *tp_err_stats_buff; struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - struct struct_tp_err_stats *tp_err_stats_buff; - u32 size; int rc = 0; - size = sizeof(struct struct_tp_err_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tp_err_stats_buff), &scratch_buff); tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data; - - /* spin_lock(&padap->stats_lock); TODO*/ - t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1); - /* spin_unlock(&padap->stats_lock); TODO*/ + t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, true); tp_err_stats_buff->nchan = padap->chip_params->nchan; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_tcp_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_tcp_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct struct_tcp_stats *tcp_stats_buff; - u32 size; + struct cudbg_buffer scratch_buff; int rc = 0; - size = sizeof(struct struct_tcp_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tcp_stats_buff), &scratch_buff); tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data; - - /* spin_lock(&padap->stats_lock); TODO*/ - t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1); - /* spin_unlock(&padap->stats_lock); TODO*/ - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, true); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_hw_sched(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct struct_hw_sched *hw_sched_buff; - u32 size; + struct cudbg_buffer scratch_buff; int i, rc = 0; - if (!padap->params.vpd.cclk) { - rc = CUDBG_STATUS_CCLK_NOT_DEFINED; - goto err; - } - - size = sizeof(struct struct_hw_sched); - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; + if (!padap->params.vpd.cclk) + return CUDBG_STATUS_CCLK_NOT_DEFINED; + GET_SCRATCH_BUFF(dbg_buff, sizeof(*hw_sched_buff), &scratch_buff); hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data; - hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP); hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG)); t4_read_pace_tbl(padap, hw_sched_buff->pace_tab); - for (i = 0; i < NTX_SCHED; ++i) { - t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i], - &hw_sched_buff->ipg[i], 1); + t4_get_tx_sched(padap, i, &(hw_sched_buff->kbps[i]), + &(hw_sched_buff->ipg[i]), true); } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_pm_stats(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; - struct cudbg_buffer scratch_buff; struct struct_pm_stats *pm_stats_buff; - u32 size; + struct cudbg_buffer scratch_buff; int rc = 0; - size = sizeof(struct struct_pm_stats); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*pm_stats_buff), &scratch_buff); pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data; - + cudbg_access_lock_acquire(pdbg_init); t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc); t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + cudbg_access_lock_release(pdbg_init); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_path_mtu(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - u32 size; int rc = 0; - size = NMTUS * sizeof(u16); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, NMTUS * sizeof(u16), &scratch_buff); t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_rss_key(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_rss_key(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; - u32 size; - int rc = 0; - size = 10 * sizeof(u32); - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - - t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + GET_SCRATCH_BUFF(dbg_buff, 10 * sizeof(u32), &scratch_buff); + t4_read_rss_key(padap, (u32 *)scratch_buff.data, true); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_rss_config(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_rss_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; struct rss_config *rss_conf; int rc; - u32 size; - - size = sizeof(struct rss_config); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; + GET_SCRATCH_BUFF(dbg_buff, sizeof(struct rss_config), &scratch_buff); rss_conf = (struct rss_config *)scratch_buff.data; - rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG); rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL); rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD); rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN); rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT); rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG); - rss_conf->chip = padap->params.chipid; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + rss_conf->chip = chip_id(padap); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_rss_vf_config(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; struct rss_vf_conf *vfconf; int vf, rc, vf_count; - u32 size; vf_count = padap->chip_params->vfcount; - size = vf_count * sizeof(*vfconf); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - - vfconf = (struct rss_vf_conf *)scratch_buff.data; - + GET_SCRATCH_BUFF(dbg_buff, vf_count * sizeof(*vfconf), &scratch_buff); + vfconf = (struct rss_vf_conf *)scratch_buff.data; for (vf = 0; vf < vf_count; vf++) { t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl, - &vfconf[vf].rss_vf_vfh, 1); + &vfconf[vf].rss_vf_vfh, true); } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_rss_pf_config(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_rss_pf_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { + struct adapter *padap = pdbg_init->adap; + u32 rss_pf_map = 0, rss_pf_mask = 0; struct cudbg_buffer scratch_buff; struct rss_pf_conf *pfconf; - struct adapter *padap = pdbg_init->adap; - u32 rss_pf_map, rss_pf_mask, size; - int pf, rc; - - size = 8 * sizeof(*pfconf); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - - pfconf = (struct rss_pf_conf *)scratch_buff.data; - - rss_pf_map = t4_read_rss_pf_map(padap, 1); - rss_pf_mask = t4_read_rss_pf_mask(padap, 1); + int pf, rc = 0; + GET_SCRATCH_BUFF(dbg_buff, 8 * sizeof(*pfconf), &scratch_buff); + pfconf = (struct rss_pf_conf *)scratch_buff.data; + rss_pf_map = t4_read_rss_pf_map(padap, true); + rss_pf_mask = t4_read_rss_pf_mask(padap, true); for (pf = 0; pf < 8; pf++) { pfconf[pf].rss_pf_map = rss_pf_map; pfconf[pf].rss_pf_mask = rss_pf_mask; - /* no return val */ - t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1); + t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, true); } + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); + return rc; +} - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; +/* Fetch the @region_name's start and end from @meminfo. */ +int cudbg_get_mem_region(struct struct_meminfo *meminfo, + const char *region_name, + struct struct_mem_desc *mem_desc) +{ + u32 i, idx = 0, found = 0; - rc = compress_buff(&scratch_buff, dbg_buff); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: - return rc; + for (i = 0; i < ARRAY_SIZE(region); i++) { + if (!strcmp(region[i], region_name)) { + found = 1; + idx = i; + break; + } + } + if (!found) + return -EINVAL; + + found = 0; + for (i = 0; i < meminfo->mem_c; i++) { + if (meminfo->mem[i].idx >= ARRAY_SIZE(region)) + continue; /* Skip holes */ + + if (!(meminfo->mem[i].limit)) + meminfo->mem[i].limit = + i < meminfo->mem_c - 1 ? + meminfo->mem[i + 1].base - 1 : ~0; + + if (meminfo->mem[i].idx == idx) { + memcpy(mem_desc, &meminfo->mem[i], + sizeof(struct struct_mem_desc)); + found = 1; + break; + } + } + if (!found) + return -EINVAL; + + return 0; } -static int check_valid(u32 *buf, int type) +/* Fetch and update the start and end of the requested memory region w.r.t 0 + * in the corresponding EDC/MC/HMA. + */ +void cudbg_get_mem_relative(struct struct_meminfo *meminfo, + u32 *out_base, u32 *out_end, + u8 *mem_type) { - int index; - int bit; - int bit_pos = 0; + u32 base = 0, end = 0; + u8 i; - switch (type) { - case CTXT_EGRESS: - bit_pos = 176; - break; - case CTXT_INGRESS: - bit_pos = 141; - break; - case CTXT_FLM: - bit_pos = 89; + for (i = 0; i < 4; i++) { + if (i && !meminfo->avail[i].base) + continue; + + if (*out_base > meminfo->avail[i].limit) + continue; + + base = *out_base - meminfo->avail[i].base; + end = *out_end - meminfo->avail[i].base; break; } - index = bit_pos / 32; - bit = bit_pos % 32; - return buf[index] & (1U << bit); + *out_base = base; + *out_end = end; + /* Check if both MC0 and MC1 exist. If we stopped at MC0, then see + * if the actual index corresponds to MC1 (4) or above. if yes, + * then MC0 is not present and hence update the real index + * appropriately. See fill_meminfo for more information. + */ + if (i == MEM_MC0) + if (meminfo->avail[i].idx > 3) + i += meminfo->avail[i].idx - 3; + *mem_type = i; } -/** - * Get EGRESS, INGRESS, FLM, and CNM max qid. - * - * For EGRESS and INGRESS, do the following calculation. - * max_qid = (DBQ/IMSG context region size in bytes) / - * (size of context in bytes). - * - * For FLM, do the following calculation. - * max_qid = (FLM cache region size in bytes) / - * ((number of pointers cached in EDRAM) * 8 (bytes per pointer)). - * - * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting - * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header - * splitting is enabled, then max CNM qid is half of max FLM qid. - */ -static int get_max_ctxt_qid(struct adapter *padap, - struct struct_meminfo *meminfo, - u32 *max_ctx_qid, u8 nelem) +static int cudbg_get_ctxt_region_info(struct adapter *padap, + struct struct_meminfo *meminfo, + struct struct_region_info *ctx_info) { - u32 i, idx, found = 0; + struct struct_mem_desc mem_desc = { 0 }; + u32 i, value; + u8 flq; + int rc; - if (nelem != (CTXT_CNM + 1)) - return -EINVAL; + /* Get EGRESS and INGRESS context region size */ + for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { + memset(&mem_desc, 0, sizeof(struct struct_mem_desc)); + rc = cudbg_get_mem_region(meminfo, region[i], &mem_desc); + if (rc) { + ctx_info[i].exist = false; + } else { + ctx_info[i].exist = true; + ctx_info[i].start = mem_desc.base; + ctx_info[i].end = mem_desc.limit; + } + } - for (i = 0; i < meminfo->mem_c; i++) { - if (meminfo->mem[i].idx >= ARRAY_SIZE(region)) - continue; /* skip holes */ + /* Get FLM and CNM max qid. */ + value = t4_read_reg(padap, A_SGE_FLM_CFG); - idx = meminfo->mem[i].idx; - /* Get DBQ, IMSG, and FLM context region size */ - if (idx <= CTXT_FLM) { - if (!(meminfo->mem[i].limit)) - meminfo->mem[i].limit = - i < meminfo->mem_c - 1 ? - meminfo->mem[i + 1].base - 1 : ~0; - - if (idx < CTXT_FLM) { - /* Get EGRESS and INGRESS max qid. */ - max_ctx_qid[idx] = (meminfo->mem[i].limit - - meminfo->mem[i].base + 1) / - CUDBG_CTXT_SIZE_BYTES; - found++; - } else { - /* Get FLM and CNM max qid. */ - u32 value, edram_ptr_count; - u8 bytes_per_ptr = 8; - u8 nohdr; + /* Get number of data freelist queues */ + flq = G_HDRSTARTFLQ(value); + ctx_info[CTXT_FLM].exist = true; + ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE; - value = t4_read_reg(padap, A_SGE_FLM_CFG); + /* The number of CONM contexts are same as number of freelist + * queues. + */ + ctx_info[CTXT_CNM].exist = true; + ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end; - /* Check if header splitting is enabled. */ - nohdr = (value >> S_NOHDR) & 1U; + return 0; +} - /* Get the number of pointers in EDRAM per - * qid in units of 32. - */ - edram_ptr_count = 32 * - (1U << G_EDRAMPTRCNT(value)); +int cudbg_dump_context_size(struct adapter *padap) +{ + struct struct_region_info region_info[CTXT_CNM + 1] = {{ 0 }}; + struct struct_meminfo meminfo = {{{ 0 }}}; + u32 i, size = 0; + int rc; - /* EDRAMPTRCNT value of 3 is reserved. - * So don't exceed 128. - */ - if (edram_ptr_count > 128) - edram_ptr_count = 128; - - max_ctx_qid[idx] = (meminfo->mem[i].limit - - meminfo->mem[i].base + 1) / - (edram_ptr_count * - bytes_per_ptr); - found++; - - /* CNM has 1-to-1 mapping with FLM. - * However, if header splitting is enabled, - * then max CNM qid is half of max FLM qid. - */ - max_ctx_qid[CTXT_CNM] = nohdr ? - max_ctx_qid[idx] : - max_ctx_qid[idx] >> 1; + rc = fill_meminfo(padap, &meminfo); + if (rc) + return rc; - /* One more increment for CNM */ - found++; - } + /* Get max valid qid for each type of queue */ + rc = cudbg_get_ctxt_region_info(padap, &meminfo, region_info); + if (rc) + return rc; + + for (i = 0; i < CTXT_CNM; i++) { + if (!region_info[i].exist) { + if (i == CTXT_EGRESS || i == CTXT_INGRESS) + size += CUDBG_LOWMEM_MAX_CTXT_QIDS * + SGE_CTXT_SIZE; + continue; } - if (found == nelem) - break; + + size += (region_info[i].end - region_info[i].start + 1) / + SGE_CTXT_SIZE; } + return size * sizeof(struct cudbg_ch_cntxt); +} - /* Sanity check. Ensure the values are within known max. */ - max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS], - M_CTXTQID); - max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS], - CUDBG_MAX_INGRESS_QIDS); - max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM], - CUDBG_MAX_FL_QIDS); - max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM], - CUDBG_MAX_CNM_QIDS); - return 0; +static u32 cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid, + u8 ctxt_type, struct cudbg_ch_cntxt **out_buff) +{ + struct cudbg_ch_cntxt *buff = *out_buff; + u32 j, total_size = 0; + int rc; + + for (j = 0; j < max_qid; j++) { + read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data); + rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type); + if (!rc) + continue; + + buff->cntxt_type = ctxt_type; + buff->cntxt_id = j; + buff++; + total_size += sizeof(struct cudbg_ch_cntxt); + if (ctxt_type == CTXT_FLM) { + read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data); + buff->cntxt_type = CTXT_CNM; + buff->cntxt_id = j; + buff++; + total_size += sizeof(struct cudbg_ch_cntxt); + } + } + + *out_buff = buff; + return total_size; } -static int collect_dump_context(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; - struct cudbg_buffer temp_buff; - struct adapter *padap = pdbg_init->adap; + struct struct_region_info region_info[CTXT_CNM + 1] = {{ 0 }}; u32 size = 0, next_offset = 0, total_size = 0; + struct cudbg_buffer scratch_buff, temp_buff; + struct adapter *padap = pdbg_init->adap; + u8 mem_type[CTXT_INGRESS + 1] = { 0 }; struct cudbg_ch_cntxt *buff = NULL; struct struct_meminfo meminfo; - int bytes = 0; - int rc = 0; - u32 i, j; - u32 max_ctx_qid[CTXT_CNM + 1]; - bool limit_qid = false; - u32 qid_count = 0; + u32 max_ctx_size, max_ctx_qid; + u64 *dst_off, *src_off; + int bytes = 0, rc = 0; + u8 i, k; + u32 j; rc = fill_meminfo(padap, &meminfo); if (rc) goto err; /* Get max valid qid for each type of queue */ - rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1); + rc = cudbg_get_ctxt_region_info(padap, &meminfo, region_info); if (rc) goto err; - /* There are four types of queues. Collect context upto max - * qid of each type of queue. + rc = cudbg_dump_context_size(padap); + if (rc < 0) + goto err; + + size = rc; + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); + + /* Get the relative start and end of context regions w.r.t 0; + * in the corresponding memory. */ - for (i = CTXT_EGRESS; i <= CTXT_CNM; i++) - size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i]; - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) { - /* Not enough scratch Memory available. - * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS - * for each queue type. - */ - size = 0; - for (i = CTXT_EGRESS; i <= CTXT_CNM; i++) - size += sizeof(struct cudbg_ch_cntxt) * - CUDBG_LOWMEM_MAX_CTXT_QIDS; + for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { + if (!region_info[i].exist) + continue; - limit_qid = true; - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; + cudbg_get_mem_relative(&meminfo, ®ion_info[i].start, + ®ion_info[i].end, &mem_type[i]); } + /* Get buffer with enough space to read the biggest context + * region in memory. + */ + max_ctx_size = max(region_info[CTXT_EGRESS].end - + region_info[CTXT_EGRESS].start + 1, + region_info[CTXT_INGRESS].end - + region_info[CTXT_INGRESS].start + 1); + rc = get_scratch_buff(dbg_buff, max_ctx_size, &temp_buff); + if (rc) + goto err1; + buff = (struct cudbg_ch_cntxt *)scratch_buff.data; - /* Collect context data */ - for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) { - qid_count = 0; - for (j = 0; j < max_ctx_qid[i]; j++) { - read_sge_ctxt(pdbg_init, j, i, buff->data); + /* Collect EGRESS and INGRESS context data. + * In case of failures, fallback to collecting via FW or + * backdoor access. + */ + for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { + if (!region_info[i].exist) { + max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; + total_size += cudbg_get_sge_ctxt_fw(pdbg_init, + max_ctx_qid, i, + &buff); + continue; + } - rc = check_valid(buff->data, i); - if (rc) { - buff->cntxt_type = i; - buff->cntxt_id = j; - buff++; - total_size += sizeof(struct cudbg_ch_cntxt); - - if (i == CTXT_FLM) { - read_sge_ctxt(pdbg_init, j, CTXT_CNM, - buff->data); - buff->cntxt_type = CTXT_CNM; - buff->cntxt_id = j; - buff++; - total_size += - sizeof(struct cudbg_ch_cntxt); - } - qid_count++; - } + max_ctx_size = region_info[i].end - region_info[i].start + 1; + max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; + + if (is_fw_attached(pdbg_init)) { + t4_sge_ctxt_flush(padap, padap->mbox, i); + rc = pdbg_init->mc_collect_cb(pdbg_init, mem_type[i], + region_info[i].start, max_ctx_size, + (void *)temp_buff.data); + } + + if (rc || !is_fw_attached(pdbg_init)) { + max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; + total_size += cudbg_get_sge_ctxt_fw(pdbg_init, + max_ctx_qid, i, + &buff); + continue; + } - /* If there's not enough space to collect more qids, - * then bail and move on to next queue type. + for (j = 0; j < max_ctx_qid; j++) { + src_off = (u64 *)(temp_buff.data + j * SGE_CTXT_SIZE); + dst_off = (u64 *)buff->data; + + /* The data is stored in 64-bit cpu order. Convert it to + * big endian before parsing. */ - if (limit_qid && - qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS) - break; + for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++) + dst_off[k] = cpu_to_be64(src_off[k]); + + rc = cudbg_sge_ctxt_check_valid(buff->data, i); + if (!rc) + continue; + + buff->cntxt_type = i; + buff->cntxt_id = j; + buff++; + total_size += sizeof(struct cudbg_ch_cntxt); } } + release_scratch_buff(&temp_buff, dbg_buff); + + /* Collect FREELIST and CONGESTION MANAGER contexts */ + max_ctx_size = region_info[CTXT_FLM].end - + region_info[CTXT_FLM].start + 1; + max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; + /* Since FLM and CONM are 1-to-1 mapped, the below function + * will fetch both FLM and CONM contexts. + */ + total_size += cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, + CTXT_FLM, &buff); + scratch_buff.size = total_size; - rc = write_compression_hdr(&scratch_buff, dbg_buff); + rc = write_compression_hdr(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err1; @@ -1943,7 +2120,7 @@ static int collect_dump_context(struct cudbg_init *pdbg_init, temp_buff.data = (void *)((char *)scratch_buff.data + next_offset); - rc = compress_buff(&temp_buff, dbg_buff); + rc = compress_buff(pdbg_init, &temp_buff, dbg_buff); if (rc) goto err1; @@ -1958,54 +2135,22 @@ static int collect_dump_context(struct cudbg_init *pdbg_init, return rc; } -static int collect_fw_devlog(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { -#ifdef notyet struct adapter *padap = pdbg_init->adap; struct devlog_params *dparams = &padap->params.devlog; - struct cudbg_param *params = NULL; struct cudbg_buffer scratch_buff; u32 offset; - int rc = 0, i; - - rc = t4_init_devlog_params(padap, 1); - - if (rc < 0) { - pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\ - "%d\n", __func__, rc); - for (i = 0; i < pdbg_init->dbg_params_cnt; i++) { - if (pdbg_init->dbg_params[i].param_type == - CUDBG_DEVLOG_PARAM) { - params = &pdbg_init->dbg_params[i]; - break; - } - } - - if (params) { - dparams->memtype = params->u.devlog_param.memtype; - dparams->start = params->u.devlog_param.start; - dparams->size = params->u.devlog_param.size; - } else { - cudbg_err->sys_err = rc; - goto err; - } - } - - rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff); - - if (rc) - goto err; + int rc = 0; + GET_SCRATCH_BUFF(dbg_buff, dparams->size, &scratch_buff); /* Collect FW devlog */ - if (dparams->start != 0) { + if (dparams->addr != 0) { offset = scratch_buff.offset; - rc = t4_memory_rw(padap, padap->params.drv_memwin, - dparams->memtype, dparams->start, - dparams->size, - (__be32 *)((char *)scratch_buff.data + - offset), 1); + rc = read_via_memwin(padap, 1, dparams->addr, + (void *)(scratch_buff.data + offset), dparams->size); if (rc) { pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\ @@ -2014,242 +2159,116 @@ static int collect_fw_devlog(struct cudbg_init *pdbg_init, goto err1; } } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: - return rc; -#endif - return (CUDBG_STATUS_NOT_IMPLEMENTED); -} -/* CIM OBQ */ - -static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 0; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 1; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 2; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 3; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_cim_obq_sge(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 4; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 5; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 6; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - - return rc; -} - -static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - int rc = 0, qid = 7; - - rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; } +/* CIM OBQ */ static int read_cim_obq(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err, int qid) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; + int no_of_read_words, rc = 0; u32 qsize; - int rc; - int no_of_read_words; /* collect CIM OBQ */ qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32); - rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, qsize, &scratch_buff); /* t4_read_cim_obq will return no. of read words or error */ no_of_read_words = t4_read_cim_obq(padap, qid, (u32 *)((u32 *)scratch_buff.data + scratch_buff.offset), qsize); - /* no_of_read_words is less than or equal to 0 means error */ if (no_of_read_words <= 0) { if (no_of_read_words == 0) rc = CUDBG_SYSTEM_ERROR; else rc = no_of_read_words; - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n", + pdbg_init->print("%s(), t4_read_cim_obq failed!, rc: %d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; } - scratch_buff.size = no_of_read_words * 4; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - - if (rc) - goto err1; - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -/* CIM IBQ */ - -static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, +int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - int rc = 0, qid = 0; - - rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0); } -static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, +int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - int rc = 0, qid = 1; - - rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1); } -static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, +int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - int rc = 0, qid = 2; + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2); +} - rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; +int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3); } -static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - int rc = 0, qid = 3; + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4); +} - rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; +int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5); } -static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, +int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - int rc = 0, qid = 4; - - rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6); } -static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, +int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - int rc, qid = 5; - - rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid); - return rc; + return read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); } +/* CIM IBQ */ static int read_cim_ibq(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err, int qid) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer scratch_buff; + int no_of_read_words, rc = 0; u32 qsize; - int rc; - int no_of_read_words; /* collect CIM IBQ */ qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); - rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff); - - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, qsize, &scratch_buff); /* t4_read_cim_ibq will return no. of read words or error */ no_of_read_words = t4_read_cim_ibq(padap, qid, (u32 *)((u32 *)scratch_buff.data + @@ -2260,92 +2279,102 @@ static int read_cim_ibq(struct cudbg_init *pdbg_init, rc = CUDBG_SYSTEM_ERROR; else rc = no_of_read_words; - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n", + pdbg_init->print("%s(), t4_read_cim_ibq failed!, rc: %d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; } + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); +err1: + release_scratch_buff(&scratch_buff, dbg_buff); + return rc; +} - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; +int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0); +} + +int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1); +} - rc = compress_buff(&scratch_buff, dbg_buff); - if (rc) - goto err1; +int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2); +} -err1: - release_scratch_buff(&scratch_buff, dbg_buff); +int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3); +} -err: - return rc; +int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4); } -static int collect_cim_ma_la(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5); +} + +int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; - u32 rc = 0; + struct cudbg_buffer scratch_buff; + int rc = 0; /* collect CIM MA LA */ scratch_buff.size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - - /* no return */ + GET_SCRATCH_BUFF(dbg_buff, scratch_buff.size, &scratch_buff); t4_cim_read_ma_la(padap, (u32 *) ((char *)scratch_buff.data + scratch_buff.offset), (u32 *) ((char *)scratch_buff.data + scratch_buff.offset + 5 * CIM_MALA_SIZE)); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_cim_la(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; - - int rc; + struct cudbg_buffer scratch_buff; + int size, rc = 0; u32 cfg = 0; - int size; /* collect CIM LA */ if (is_t6(padap)) { size = padap->params.cim_la_size / 10 + 1; - size *= 11 * sizeof(u32); + size *= 10 * sizeof(u32); } else { size = padap->params.cim_la_size / 8; size *= 8 * sizeof(u32); } size += sizeof(cfg); - - rc = get_scratch_buff(dbg_buff, size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg); - if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_cim_read failed (%d)\n", + pdbg_init->print("%s(), t4_cim_read failed!, rc: %d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; @@ -2353,64 +2382,41 @@ static int collect_cim_la(struct cudbg_init *pdbg_init, memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg, sizeof(cfg)); - rc = t4_cim_read_la(padap, (u32 *) ((char *)scratch_buff.data + scratch_buff.offset + sizeof(cfg)), NULL); if (rc < 0) { - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_cim_read_la failed (%d)\n", + pdbg_init->print("%s(), t4_cim_read_la failed!, rc: %d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - if (rc) - goto err1; - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_cim_qcfg(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; + struct struct_cim_qcfg *cim_qcfg_data = NULL; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; + int rc = 0; u32 offset; - int cim_num_obq, rc = 0; - - struct struct_cim_qcfg *cim_qcfg_data = NULL; - - rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg), - &scratch_buff); - - if (rc) - goto err; + GET_SCRATCH_BUFF(dbg_buff, sizeof(*cim_qcfg_data), &scratch_buff); offset = scratch_buff.offset; - - cim_num_obq = is_t4(padap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; - cim_qcfg_data = (struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data + offset)); - rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR, ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat); - if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n", - __func__, rc); + pdbg_init->print("%s(), t4_cim_read IBQ_0_RDADDR failed!, rc: "\ + "%d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; } @@ -2418,32 +2424,20 @@ static int collect_cim_qcfg(struct cudbg_init *pdbg_init, rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR, ARRAY_SIZE(cim_qcfg_data->obq_wr), cim_qcfg_data->obq_wr); - if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n", - __func__, rc); + pdbg_init->print("%s(), t4_cim_read OBQ_0_REALADDR failed!, "\ + "rc: %d\n", __func__, rc); cudbg_err->sys_err = rc; goto err1; } - /* no return val */ t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size, cim_qcfg_data->thres); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - if (rc) - goto err1; - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } @@ -2463,14 +2457,13 @@ static int collect_cim_qcfg(struct cudbg_init *pdbg_init, * start and end wrt 0 and @mem_tot_len, respectively, and set * @reg_info->exist to true. Otherwise, set @reg_info->exist to false. */ -#ifdef notyet static int get_payload_range(struct adapter *padap, u8 mem_type, unsigned long mem_tot_len, u8 payload_type, struct struct_region_info *reg_info) { - struct struct_meminfo meminfo; struct struct_mem_desc mem_region; struct struct_mem_desc payload; + struct struct_meminfo meminfo; u32 i, idx, found = 0; u8 mc_type; int rc; @@ -2545,32 +2538,36 @@ static int get_payload_range(struct adapter *padap, u8 mem_type, return 0; } -#endif static int read_fw_mem(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, u8 mem_type, unsigned long tot_len, struct cudbg_error *cudbg_err) { -#ifdef notyet - struct cudbg_buffer scratch_buff; - struct adapter *padap = pdbg_init->adap; - unsigned long bytes_read = 0; - unsigned long bytes_left; - unsigned long bytes; - int rc; + unsigned long compress_bytes, compress_bytes_left, compress_bytes_read; struct struct_region_info payload[2]; /* TX and RX Payload Region */ + unsigned long bytes, bytes_left, bytes_read = 0; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff = { 0 }; + u32 yield_itr = CUDBG_YIELD_ITERATION; + struct cudbg_buffer temp_buff = { 0 }; u16 get_payload_flag; + u32 yield_count = 0; + u16 yield_flag; + int rc = 0; u8 i; get_payload_flag = pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type; + yield_flag = + pdbg_init->dbg_params[CUDBG_YIELD_ITER_PARAM].param_type; + /* If explicitly asked to get TX/RX Payload data, * then don't zero out the payload data. Otherwise, * zero out the payload data. */ if (!get_payload_flag) { - u8 region_index[2]; + u8 region_index[2] = {0}; u8 j = 0; /* Find the index of TX and RX Payload regions in meminfo */ @@ -2595,26 +2592,38 @@ static int read_fw_mem(struct cudbg_init *pdbg_init, if (payload[i].exist) { /* Align start and end to avoid wrap around */ - payload[i].start = - roundup(payload[i].start, - CUDBG_CHUNK_SIZE); - payload[i].end = - rounddown(payload[i].end, - CUDBG_CHUNK_SIZE); + payload[i].start = roundup(payload[i].start, + CUDBG_CHUNK_SIZE); + payload[i].end = rounddown(payload[i].end, + CUDBG_CHUNK_SIZE); } } } bytes_left = tot_len; scratch_buff.size = tot_len; - rc = write_compression_hdr(&scratch_buff, dbg_buff); + rc = write_compression_hdr(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err; - while (bytes_left > 0) { - bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE); - rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff); + if (yield_flag) + yield_itr = pdbg_init->dbg_params[CUDBG_YIELD_ITER_PARAM].u.yield_param.itr; + while (bytes_left > 0) { + /* As mc size is huge, this loop will hold cpu for a longer time. + * OS may think that the process is hanged and will generate + * deadlock trace. + * So yield the cpu regularly, after some iterations. + */ + yield_count++; + if (yield_count % yield_itr == 0) + if (pdbg_init->yield_cb) + pdbg_init->yield_cb(pdbg_init); + + bytes = min_t(unsigned long, bytes_left, + (unsigned long)(CUDBG_MEM_TOT_READ_SIZE)); + rc = get_scratch_buff_aligned(dbg_buff, bytes, &scratch_buff, + CUDBG_MEM_ALIGN); if (rc) { rc = CUDBG_STATUS_NO_SCRATCH_MEM; goto err; @@ -2634,24 +2643,34 @@ static int read_fw_mem(struct cudbg_init *pdbg_init, } } - /* Read from file */ - /*fread(scratch_buff.data, 1, Bytes, in);*/ - rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read, - bytes, (__be32 *)(scratch_buff.data), 1); - + rc = pdbg_init->mc_collect_cb(pdbg_init, mem_type, + bytes_read, bytes, + (u8 *)scratch_buff.data); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_memory_rw failed (%d)", - __func__, rc); + pdbg_init->print("%s(), t4_memory_rw failed!, rc: %d\n", + __func__, rc); cudbg_err->sys_err = rc; goto err1; } skip_read: - rc = compress_buff(&scratch_buff, dbg_buff); - if (rc) - goto err1; - + /* Compress collected data */ + compress_bytes_left = bytes; + compress_bytes_read = 0; + while (compress_bytes_left > 0) { + compress_bytes = min_t(unsigned long, + compress_bytes_left, + (unsigned long)CUDBG_CHUNK_SIZE); + temp_buff.data = + (char *)scratch_buff.data + compress_bytes_read; + temp_buff.offset = 0; + temp_buff.size = compress_bytes; + rc = compress_buff(pdbg_init, &temp_buff, dbg_buff); + if (rc) + goto err1; + compress_bytes_left -= compress_bytes; + compress_bytes_read += compress_bytes; + } bytes_left -= bytes; bytes_read += bytes; release_scratch_buff(&scratch_buff, dbg_buff); @@ -2660,11 +2679,8 @@ static int read_fw_mem(struct cudbg_init *pdbg_init, err1: if (rc) release_scratch_buff(&scratch_buff, dbg_buff); - err: return rc; -#endif - return (CUDBG_STATUS_NOT_IMPLEMENTED); } static void collect_mem_info(struct cudbg_init *pdbg_init, @@ -2672,12 +2688,8 @@ static void collect_mem_info(struct cudbg_init *pdbg_init, { struct adapter *padap = pdbg_init->adap; u32 value; - int t4 = 0; - if (is_t4(padap)) - t4 = 1; - - if (t4) { + if (is_t4(padap)) { value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR); value = G_EXT_MEM_SIZE(value); mem_info->size_mc0 = (u16)value; /* size in MB */ @@ -2694,11 +2706,15 @@ static void collect_mem_info(struct cudbg_init *pdbg_init, value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR); value = G_EXT_MEM1_SIZE(value); mem_info->size_mc1 = (u16)value; + /*in t6 no mc1 so HMA shares mc1 address space */ + mem_info->size_hma = (u16)value; value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE); if (value & F_EXT_MEM0_ENABLE) mem_info->mem_flag |= (1 << MC0_FLAG); - if (value & F_EXT_MEM1_ENABLE) + if (value & F_HMA_MUX) + mem_info->mem_flag |= (1 << HMA_FLAG); + else if (value & F_EXT_MEM1_ENABLE) mem_info->mem_flag |= (1 << MC1_FLAG); } @@ -2715,7 +2731,6 @@ static void collect_mem_info(struct cudbg_init *pdbg_init, mem_info->mem_flag |= (1 << EDC0_FLAG); if (value & F_EDRAM1_ENABLE) mem_info->mem_flag |= (1 << EDC1_FLAG); - } static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, @@ -2725,157 +2740,125 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, int rc; if (is_fw_attached(pdbg_init)) { - /* Flush uP dcache before reading edcX/mcX */ - rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, - "t4cudl"); - if (rc == 0) { - rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH); - end_synchronized_op(padap, 0); - } - + rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("%s: t4_fwcache failed (%d)\n", + pdbg_init->print("%s(), Warning: t4_fwcache failed!, rc: %d\n", __func__, rc); cudbg_err->sys_warn = rc; } } } -static int collect_edc0_meminfo(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +static int collect_mem_region(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err, + u8 mem_type) { struct card_mem mem_info = {0}; - unsigned long edc0_size; + unsigned long flag, size; int rc; cudbg_t4_fwcache(pdbg_init, cudbg_err); - collect_mem_info(pdbg_init, &mem_info); - - if (mem_info.mem_flag & (1 << EDC0_FLAG)) { - edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024); - rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0, - edc0_size, cudbg_err); - if (rc) - goto err; - - } else { + switch (mem_type) { + case MEM_EDC0: + flag = (1 << EDC0_FLAG); + size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024); + break; + case MEM_EDC1: + flag = (1 << EDC1_FLAG); + size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024); + break; + case MEM_MC0: + flag = (1 << MC0_FLAG); + size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024); + break; + case MEM_MC1: + flag = (1 << MC1_FLAG); + size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024); + break; + default: rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - if (pdbg_init->verbose) - pdbg_init->print("%s(), collect_mem_info failed!, %s\n", - __func__, err_msg[-rc]); goto err; - } -err: - return rc; -} - -static int collect_edc1_meminfo(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ - struct card_mem mem_info = {0}; - unsigned long edc1_size; - int rc; - - cudbg_t4_fwcache(pdbg_init, cudbg_err); - - collect_mem_info(pdbg_init, &mem_info); - if (mem_info.mem_flag & (1 << EDC1_FLAG)) { - edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024); - rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1, - edc1_size, cudbg_err); + if (mem_info.mem_flag & flag) { + rc = read_fw_mem(pdbg_init, dbg_buff, mem_type, + size, cudbg_err); if (rc) goto err; } else { rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - if (pdbg_init->verbose) - pdbg_init->print("%s(), collect_mem_info failed!, %s\n", + pdbg_init->print("%s(), collect_mem_info failed!, %s\n", __func__, err_msg[-rc]); goto err; } - err: - return rc; } -static int collect_mc0_meminfo(struct cudbg_init *pdbg_init, +int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return collect_mem_region(pdbg_init, dbg_buff, cudbg_err, MEM_EDC0); +} + +int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - struct card_mem mem_info = {0}; - unsigned long mc0_size; - int rc; - - cudbg_t4_fwcache(pdbg_init, cudbg_err); - - collect_mem_info(pdbg_init, &mem_info); + return collect_mem_region(pdbg_init, dbg_buff, cudbg_err, MEM_EDC1); +} - if (mem_info.mem_flag & (1 << MC0_FLAG)) { - mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024); - rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0, - mc0_size, cudbg_err); - if (rc) - goto err; - } else { - rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - if (pdbg_init->verbose) - pdbg_init->print("%s(), collect_mem_info failed!, %s\n", - __func__, err_msg[-rc]); - goto err; - } +int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return collect_mem_region(pdbg_init, dbg_buff, cudbg_err, MEM_MC0); +} -err: - return rc; +int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return collect_mem_region(pdbg_init, dbg_buff, cudbg_err, MEM_MC1); } -static int collect_mc1_meminfo(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct card_mem mem_info = {0}; - unsigned long mc1_size; + unsigned long hma_size; int rc; cudbg_t4_fwcache(pdbg_init, cudbg_err); - collect_mem_info(pdbg_init, &mem_info); - - if (mem_info.mem_flag & (1 << MC1_FLAG)) { - mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024); - rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1, - mc1_size, cudbg_err); - if (rc) - goto err; + if (mem_info.mem_flag & (1 << HMA_FLAG)) { + hma_size = (((unsigned long)mem_info.size_hma) * 1024 * 1024); + rc = read_fw_mem(pdbg_init, dbg_buff, MEM_HMA, hma_size, cudbg_err); } else { rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - - if (pdbg_init->verbose) + if (pdbg_init->verbose) { pdbg_init->print("%s(), collect_mem_info failed!, %s\n", - __func__, err_msg[-rc]); - goto err; + __func__, err_msg[-rc]); + } } -err: + return rc; } -static int collect_reg_dump(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; - struct cudbg_buffer tmp_scratch_buff; + struct cudbg_buffer tmp_scratch_buff, scratch_buff; + unsigned long bytes, bytes_left, bytes_read = 0; struct adapter *padap = pdbg_init->adap; - unsigned long bytes_read = 0; - unsigned long bytes_left; - u32 buf_size = 0, bytes = 0; - int rc = 0; + u32 buf_size = 0; + int rc = 0; if (is_t4(padap)) buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/ @@ -2883,18 +2866,11 @@ static int collect_reg_dump(struct cudbg_init *pdbg_init, buf_size = T5_REGMAP_SIZE; scratch_buff.size = buf_size; - tmp_scratch_buff = scratch_buff; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - - /* no return */ + GET_SCRATCH_BUFF(dbg_buff, scratch_buff.size, &scratch_buff); t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size); - bytes_left = scratch_buff.size; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); + bytes_left = scratch_buff.size; + rc = write_compression_hdr(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err1; @@ -2903,64 +2879,43 @@ static int collect_reg_dump(struct cudbg_init *pdbg_init, ((char *)scratch_buff.data) + bytes_read; bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE); tmp_scratch_buff.size = bytes; - compress_buff(&tmp_scratch_buff, dbg_buff); + compress_buff(pdbg_init, &tmp_scratch_buff, dbg_buff); bytes_left -= bytes; bytes_read += bytes; } - err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_cctrl(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; u32 size; int rc; size = sizeof(u16) * NMTUS * NCCTRL_WIN; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); t4_read_cong_tbl(padap, (void *)scratch_buff.data); - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } static int check_busy_bit(struct adapter *padap) { - u32 val; - u32 busy = 1; - int i = 0; - int retry = 10; - int status = 0; + int status = 0, retry = 10, i = 0; + u32 val, busy = 1; while (busy & (1 < retry)) { val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL); busy = (0 != (val & CUDBG_CIM_BUSY_BIT)); i++; } - if (busy) status = -1; - return status; } @@ -2970,36 +2925,43 @@ static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val) /* write register address into the A_CIM_HOST_ACC_CTRL */ t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr); - /* Poll HOSTBUSY */ rc = check_busy_bit(padap); if (rc) goto err; - /* Read value from A_CIM_HOST_ACC_DATA */ *val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA); - err: return rc; } static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init, - struct ireg_field *up_cim_reg, u32 *buff) + struct ireg_field *up_cim_reg, u32 *buff, u32 insta) { - u32 i; + u32 local_offset; int rc = 0; + u32 i; for (i = 0; i < up_cim_reg->ireg_offset_range; i++) { - rc = cim_ha_rreg(padap, - up_cim_reg->ireg_local_offset + (i * 4), - buff); + switch (insta) { + /* From table-334 of t5_reg_ext */ + case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES: + local_offset = (i * 0x120); + break; + case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES: + local_offset = (i * 0x10); + break; + default: + local_offset = i * 4; + } + + rc = cim_ha_rreg(padap, up_cim_reg->ireg_local_offset + + local_offset, buff); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("BUSY timeout reading" + pdbg_init->print("BUSY timeout reading" "CIM_HOST_ACC_CTRL\n"); goto err; } - buff++; } @@ -3007,26 +2969,27 @@ static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init, return rc; } -static int collect_up_cim_indirect(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct ireg_buf *up_cim; - u32 size; int i, rc, n; + u32 insta = 0; + u32 size; - n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32)); - size = sizeof(struct ireg_buf) * n; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; + if (is_t5(padap)) + n = sizeof(t5_up_cim_reg_array) / (5 * sizeof(u32)); + else if (is_t6(padap)) + n = sizeof(t6_up_cim_reg_array) / (5 * sizeof(u32)); + else + return CUDBG_STATUS_NOT_IMPLEMENTED; + size = sizeof(struct ireg_buf) * n; + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); up_cim = (struct ireg_buf *)scratch_buff.data; - for (i = 0; i < n; i++) { struct ireg_field *up_cim_reg = &up_cim->tp_pio; u32 *buff = up_cim->outbuf; @@ -3038,6 +3001,7 @@ static int collect_up_cim_indirect(struct cudbg_init *pdbg_init, t5_up_cim_reg_array[i][2]; up_cim_reg->ireg_offset_range = t5_up_cim_reg_array[i][3]; + insta = t5_up_cim_reg_array[i][4]; } else if (is_t6(padap)) { up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; @@ -3045,135 +3009,53 @@ static int collect_up_cim_indirect(struct cudbg_init *pdbg_init, t6_up_cim_reg_array[i][2]; up_cim_reg->ireg_offset_range = t6_up_cim_reg_array[i][3]; + insta = t6_up_cim_reg_array[i][4]; } - rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff); - + rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff, insta); up_cim++; } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_mbox_log(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { -#ifdef notyet - struct cudbg_buffer scratch_buff; - struct cudbg_mbox_log *mboxlog = NULL; - struct mbox_cmd_log *log = NULL; - struct mbox_cmd *entry; - u64 flit; - u32 size; - unsigned int entry_idx; - int i, k, rc; - u16 mbox_cmds; - - if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) { - log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u. - mboxlog_param.log; - mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u. - mboxlog_param.mbox_cmds; - } else { - if (pdbg_init->verbose) - pdbg_init->print("Mbox log is not requested\n"); - return CUDBG_STATUS_ENTITY_NOT_REQUESTED; - } - - size = sizeof(struct cudbg_mbox_log) * mbox_cmds; - scratch_buff.size = size; - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - - mboxlog = (struct cudbg_mbox_log *)scratch_buff.data; - - for (k = 0; k < mbox_cmds; k++) { - entry_idx = log->cursor + k; - if (entry_idx >= log->size) - entry_idx -= log->size; - entry = mbox_cmd_log_entry(log, entry_idx); - - /* skip over unused entries */ - if (entry->timestamp == 0) - continue; - - memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd)); - - for (i = 0; i < MBOX_LEN / 8; i++) { - flit = entry->cmd[i]; - mboxlog->hi[i] = (u32)(flit >> 32); - mboxlog->lo[i] = (u32)flit; - } - - mboxlog++; - } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: - return rc; -#endif return (CUDBG_STATUS_NOT_IMPLEMENTED); } -static int collect_pbt_tables(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; struct cudbg_pbt_tables *pbt = NULL; - u32 size; - u32 addr; + struct cudbg_buffer scratch_buff; int i, rc; + u32 addr; - size = sizeof(struct cudbg_pbt_tables); - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*pbt), &scratch_buff); pbt = (struct cudbg_pbt_tables *)scratch_buff.data; - /* PBT dynamic entries */ addr = CUDBG_CHAC_PBT_ADDR; for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) { rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("BUSY timeout reading" + pdbg_init->print("BUSY timeout reading" "CIM_HOST_ACC_CTRL\n"); goto err1; } } /* PBT static entries */ - /* static entries start when bit 6 is set */ addr = CUDBG_CHAC_PBT_ADDR + (1 << 6); for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) { rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("BUSY timeout reading" + pdbg_init->print("BUSY timeout reading" "CIM_HOST_ACC_CTRL\n"); goto err1; } @@ -3184,8 +3066,7 @@ static int collect_pbt_tables(struct cudbg_init *pdbg_init, for (i = 0; i < CUDBG_LRF_ENTRIES; i++) { rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("BUSY timeout reading" + pdbg_init->print("BUSY timeout reading" "CIM_HOST_ACC_CTRL\n"); goto err1; } @@ -3196,45 +3077,32 @@ static int collect_pbt_tables(struct cudbg_init *pdbg_init, for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) { rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]); if (rc) { - if (pdbg_init->verbose) - pdbg_init->print("BUSY timeout reading" + pdbg_init->print("BUSY timeout reading" "CIM_HOST_ACC_CTRL\n"); goto err1; } } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_pm_indirect(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct ireg_buf *ch_pm; - u32 size; int i, rc, n; + u32 size; n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 2; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); ch_pm = (struct ireg_buf *)scratch_buff.data; - + cudbg_access_lock_acquire(pdbg_init); /*PM_RX*/ for (i = 0; i < n; i++) { struct ireg_field *pm_pio = &ch_pm->tp_pio; @@ -3244,14 +3112,12 @@ static int collect_pm_indirect(struct cudbg_init *pdbg_init, pm_pio->ireg_data = t5_pm_rx_array[i][1]; pm_pio->ireg_local_offset = t5_pm_rx_array[i][2]; pm_pio->ireg_offset_range = t5_pm_rx_array[i][3]; - t4_read_indirect(padap, pm_pio->ireg_addr, pm_pio->ireg_data, buff, pm_pio->ireg_offset_range, pm_pio->ireg_local_offset); - ch_pm++; } @@ -3265,55 +3131,80 @@ static int collect_pm_indirect(struct cudbg_init *pdbg_init, pm_pio->ireg_data = t5_pm_tx_array[i][1]; pm_pio->ireg_local_offset = t5_pm_tx_array[i][2]; pm_pio->ireg_offset_range = t5_pm_tx_array[i][3]; - t4_read_indirect(padap, pm_pio->ireg_addr, pm_pio->ireg_data, buff, pm_pio->ireg_offset_range, pm_pio->ireg_local_offset); - ch_pm++; } + cudbg_access_lock_release(pdbg_init); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); + return rc; +} - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; +static int calculate_max_tids(struct cudbg_init *pdbg_init) +{ + struct adapter *padap = pdbg_init->adap; + u32 max_tids, value, hash_base; - rc = compress_buff(&scratch_buff, dbg_buff); + /* Check whether hash is enabled and calculate the max tids */ + value = t4_read_reg(padap, A_LE_DB_CONFIG); + if ((value >> S_HASHEN) & 1) { + value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG); + if (chip_id(padap) > CHELSIO_T5) { + hash_base = t4_read_reg(padap, + A_T6_LE_DB_HASH_TID_BASE); + max_tids = (value & 0xFFFFF) + hash_base; + } else { + hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); + max_tids = (1 << G_HASHTIDSIZE(value)) + + (hash_base >> 2); + } + } else { + if (chip_id(padap) > CHELSIO_T5) { + value = t4_read_reg(padap, A_LE_DB_CONFIG); + max_tids = (value & F_ASLIPCOMPEN) ? + CUDBG_MAX_TID_COMP_EN : + CUDBG_MAX_TID_COMP_DIS; + } else { + max_tids = CUDBG_MAX_TCAM_TID; + } + } -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: - return rc; + if (chip_id(padap) > CHELSIO_T5) + max_tids += CUDBG_T6_CLIP; + return max_tids; } -static int collect_tid(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) -{ +static u8 cudbg_letcam_get_regions(struct cudbg_init *pdbg_init, + struct cudbg_letcam *letcam, + struct cudbg_letcam_region *le_region); - struct cudbg_buffer scratch_buff; +int cudbg_collect_tid(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct cudbg_letcam_region *le_region = NULL, *tmp_region; + struct cudbg_buffer scratch_buff, region_buff; struct adapter *padap = pdbg_init->adap; - struct tid_info_region *tid; + struct cudbg_letcam letcam = {{ 0 }}; struct tid_info_region_rev1 *tid1; - u32 para[7], val[7]; - u32 mbox, pf; + struct tid_info_region *tid; + u32 para[2], val[2], pf; int rc; + u8 i; - scratch_buff.size = sizeof(struct tid_info_region_rev1); - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tid1), &scratch_buff); #define FW_PARAM_DEV_A(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF_A(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ V_FW_PARAMS_PARAM_Y(0) | \ V_FW_PARAMS_PARAM_Z(0)) #define MAX_ATIDS_A 8192U @@ -3325,100 +3216,88 @@ static int collect_tid(struct cudbg_init *pdbg_init, tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) - sizeof(struct cudbg_ver_hdr); - if (is_t5(padap)) { - tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); - tid1->tid_start = 0; - } else if (is_t6(padap)) { - tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE); - tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX); - } - tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG); - para[0] = FW_PARAM_PFVF_A(FILTER_START); - para[1] = FW_PARAM_PFVF_A(FILTER_END); - para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START); - para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END); - para[4] = FW_PARAM_DEV_A(NTID); - para[5] = FW_PARAM_PFVF_A(SERVER_START); - para[6] = FW_PARAM_PFVF_A(SERVER_END); + letcam.max_tid = calculate_max_tids(pdbg_init); + tid->ntids = letcam.max_tid; + if (chip_id(padap) > CHELSIO_T5) + tid->ntids -= CUDBG_T6_CLIP; + + /* Fill ATIDS */ + tid->natids = min(tid->ntids / 2, MAX_ATIDS_A); + letcam.region_hdr_size = sizeof(struct cudbg_letcam_region); + letcam.tid_data_hdr_size = sizeof(struct cudbg_tid_data); + + region_buff.size = LE_ET_TCAM_MAX * letcam.region_hdr_size; + GET_SCRATCH_BUFF(dbg_buff, CUDBG_CHUNK_SIZE, ®ion_buff); + le_region = (struct cudbg_letcam_region *)(region_buff.data); + letcam.nregions = cudbg_letcam_get_regions(pdbg_init, &letcam, + le_region); + + /* Update tid regions range */ + tmp_region = le_region; + for (i = 0; i < LE_ET_TCAM_MAX; i++) { + switch (tmp_region->type) { + case LE_ET_TCAM_HPFILTER: + tid->hpftid_base = tmp_region->start; + tid->nhpftids = tmp_region->nentries; + break; - rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq"); - if (rc) - goto err; - mbox = padap->mbox; - pf = padap->pf; - rc = t4_query_params(padap, mbox, pf, 0, 7, para, val); - if (rc < 0) { - if (rc == -FW_EPERM) { - /* It looks like we don't have permission to use - * padap->mbox. - * - * Try mbox 4. If it works, we'll continue to - * collect the rest of tid info from mbox 4. - * Else, quit trying to collect tid info. - */ - mbox = 4; - pf = 4; - rc = t4_query_params(padap, mbox, pf, 0, 7, para, val); - if (rc < 0) { - cudbg_err->sys_err = rc; - goto err1; - } - } else { - cudbg_err->sys_err = rc; - goto err1; - } - } + case LE_ET_TCAM_CON: + tid->aftid_base = tmp_region->start; + tid->aftid_end = tmp_region->nentries; + break; - tid->ftid_base = val[0]; - tid->nftids = val[1] - val[0] + 1; - /*active filter region*/ - if (val[2] != val[3]) { -#ifdef notyet - tid->flags |= FW_OFLD_CONN; -#endif - tid->aftid_base = val[2]; - tid->aftid_end = val[3]; - } - tid->ntids = val[4]; - tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A); - tid->stid_base = val[5]; - tid->nstids = val[6] - val[5] + 1; - - if (chip_id(padap) >= CHELSIO_T6) { - para[0] = FW_PARAM_PFVF_A(HPFILTER_START); - para[1] = FW_PARAM_PFVF_A(HPFILTER_END); - rc = t4_query_params(padap, mbox, pf, 0, 2, para, val); - if (rc < 0) { - cudbg_err->sys_err = rc; - goto err1; - } + case LE_ET_TCAM_SERVER: + tid->stid_base = tmp_region->start; + tid->nstids = tmp_region->nentries; + break; + + case LE_ET_TCAM_FILTER: + tid->ftid_base = tmp_region->start; + tid->nftids = tmp_region->nentries; + break; + + case LE_ET_TCAM_CLIP: + tid1->clip_base = tmp_region->start; + tid1->nclip = tmp_region->nentries; + break; + + case LE_ET_TCAM_ROUTING: + tid1->route_base = tmp_region->start; + tid1->nroute = tmp_region->nentries; + break; - tid->hpftid_base = val[0]; - tid->nhpftids = val[1] - val[0] + 1; + case LE_ET_HASH_CON: + tid->hash_base = tmp_region->start; + tid1->nhash = tmp_region->nentries; + break; + } + tmp_region = (struct cudbg_letcam_region *) + (((u8 *)tmp_region) + + letcam.region_hdr_size); } - if (chip_id(padap) <= CHELSIO_T5) { - tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4; - tid->hash_base /= 4; - } else - tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX); + /* Free up region_buff */ + release_scratch_buff(®ion_buff, dbg_buff); /*UO context range*/ para[0] = FW_PARAM_PFVF_A(ETHOFLD_START); para[1] = FW_PARAM_PFVF_A(ETHOFLD_END); - rc = t4_query_params(padap, mbox, pf, 0, 2, para, val); - if (rc < 0) { - cudbg_err->sys_err = rc; - goto err1; - } + for (pf = 0; pf <= M_PCIE_FW_MASTER; pf++) { + rc = t4_query_params(padap, padap->mbox, pf, 0, 2, para, val); + if (rc || !val[0] || !val[1]) + continue; + + if (!tid->nuotids) + tid->uotid_base = val[0]; + else + tid->uotid_base = min(tid->uotid_base, val[0]); - if (val[0] != val[1]) { - tid->uotid_base = val[0]; - tid->nuotids = val[1] - val[0] + 1; + tid->nuotids += val[1] - val[0] + 1; } + tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4); tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6); @@ -3426,48 +3305,26 @@ static int collect_tid(struct cudbg_init *pdbg_init, #undef FW_PARAM_DEV_A #undef MAX_ATIDS_A - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - rc = compress_buff(&scratch_buff, dbg_buff); - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: - end_synchronized_op(padap, 0); release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_tx_rate(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_tx_rate(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct tx_rate *tx_rate; - u32 size; int rc; - size = sizeof(struct tx_rate); - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*tx_rate), &scratch_buff); tx_rate = (struct tx_rate *)scratch_buff.data; t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate); tx_rate->nchan = padap->chip_params->nchan; - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } @@ -3478,7 +3335,8 @@ static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) memcpy(addr, (char *)&y + 2, ETH_ALEN); } -static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc) +static void cudbg_mps_rpl_backdoor(struct adapter *padap, + struct fw_ldst_mps_rplc *mps_rplc) { if (is_t5(padap)) { mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, @@ -3505,27 +3363,22 @@ static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0)); } -static int collect_mps_tcam(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; struct cudbg_mps_tcam *tcam = NULL; u32 size = 0, i, n, total_size = 0; - u32 ctl, data2; + struct cudbg_buffer scratch_buff; u64 tcamy, tcamx, val; + u32 ctl, data2; int rc; n = padap->chip_params->mps_tcam_size; size = sizeof(struct cudbg_mps_tcam) * n; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); memset(scratch_buff.data, 0, size); - tcam = (struct cudbg_mps_tcam *)scratch_buff.data; for (i = 0; i < n; i++) { if (chip_id(padap) >= CHELSIO_T6) { @@ -3559,8 +3412,8 @@ static int collect_mps_tcam(struct cudbg_init *pdbg_init, if (tcam->lookup_type && (tcam->lookup_type != M_DATALKPTYPE)) { /* Inner header VNI */ - tcam->vniy = ((data2 & F_DATAVIDH2) << 23) | - (G_DATAVIDH1(data2) << 16) | + tcam->vniy = (((data2 & F_DATAVIDH2) | + (G_DATAVIDH1(data2))) << 16) | G_VIDL(val); tcam->dip_hit = data2 & F_DATADIPHIT; } else { @@ -3580,8 +3433,8 @@ static int collect_mps_tcam(struct cudbg_init *pdbg_init, if (tcam->lookup_type && (tcam->lookup_type != M_DATALKPTYPE)) { /* Inner header VNI mask */ - tcam->vnix = ((data2 & F_DATAVIDH2) << 23) | - (G_DATAVIDH1(data2) << 16) | + tcam->vnix = (((data2 & F_DATAVIDH2) | + (G_DATAVIDH1(data2))) << 16) | G_VIDL(val); } } else { @@ -3618,16 +3471,13 @@ static int collect_mps_tcam(struct cudbg_init *pdbg_init, htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); - rc = begin_synchronized_op(padap, NULL, - SLEEP_OK | INTR_OK, "t4cudm"); - if (rc == 0) { + if (is_fw_attached(pdbg_init)) { rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); - end_synchronized_op(padap, 0); } - if (rc) - mps_rpl_backdoor(padap, &mps_rplc); + if (rc || !is_fw_attached(pdbg_init)) + cudbg_mps_rpl_backdoor(padap, &mps_rplc); else mps_rplc = ldst_cmd.u.mps.rplc; @@ -3636,7 +3486,7 @@ static int collect_mps_tcam(struct cudbg_init *pdbg_init, tcam->rplc[2] = ntohl(mps_rplc.rplc95_64); tcam->rplc[3] = ntohl(mps_rplc.rplc127_96); if (padap->chip_params->mps_rplc_size > - CUDBG_MAX_RPLC_SIZE) { + CUDBG_MAX_RPLC_SIZE) { tcam->rplc[4] = ntohl(mps_rplc.rplc159_128); tcam->rplc[5] = ntohl(mps_rplc.rplc191_160); tcam->rplc[6] = ntohl(mps_rplc.rplc223_192); @@ -3659,61 +3509,42 @@ static int collect_mps_tcam(struct cudbg_init *pdbg_init, } scratch_buff.size = total_size; - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: scratch_buff.size = size; release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; } -static int collect_pcie_config(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; u32 size, *value, j; int i, rc, n; size = sizeof(u32) * NUM_PCIE_CONFIG_REGS; n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32)); - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); value = (u32 *)scratch_buff.data; for (i = 0; i < n; i++) { for (j = t5_pcie_config_array[i][0]; j <= t5_pcie_config_array[i][1]; j += 4) { - *value++ = t4_hw_pci_read_cfg4(padap, j); + *value = t4_hw_pci_read_cfg4(padap, j); + value++; } } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, struct cudbg_tid_data *tid_data) { - int i, cmd_retry = 8; struct adapter *padap = pdbg_init->adap; + int i, cmd_retry = 8; u32 val; /* Fill REQ_DATA regs with 0's */ @@ -3738,9 +3569,9 @@ static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, val = (val >> S_DBGICMDBUSY) & 1; cmd_retry--; if (!cmd_retry) { - if (pdbg_init->verbose) - pdbg_init->print("%s(): Timeout waiting for non-busy\n", - __func__); + pdbg_init->print("%s(): Timeout waiting for " + "non-busy tid: 0x%x\n", + __func__, tid); return CUDBG_SYSTEM_ERROR; } } @@ -3750,8 +3581,7 @@ static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS); tid_data->dbig_rsp_stat = val; if (!(val & 1)) { - if (pdbg_init->verbose) - pdbg_init->print("%s(): DBGI command failed\n", __func__); + pdbg_init->print("%s(): DBGI command failed\n", __func__); return CUDBG_SYSTEM_ERROR; } @@ -3762,104 +3592,222 @@ static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, (i << 2)); tid_data->tid = tid; + return 0; +} + +static int cudbg_letcam_cmp(const void *a, const void *b) +{ + const struct cudbg_letcam_region *rega = + (const struct cudbg_letcam_region *)a; + const struct cudbg_letcam_region *regb = + (const struct cudbg_letcam_region *)b; + + if (rega->start < regb->start) + return -1; + if (rega->start > regb->start) + return 1; + + if (rega->type < regb->type) + return -1; + if (rega->type > regb->type) + return 1; return 0; } -static int collect_le_tcam(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +static u8 cudbg_letcam_get_regions(struct cudbg_init *pdbg_init, + struct cudbg_letcam *letcam, + struct cudbg_letcam_region *le_region) { - struct cudbg_buffer scratch_buff; + struct cudbg_letcam_region *cur_region, *next_region; struct adapter *padap = pdbg_init->adap; - struct cudbg_tcam tcam_region = {0}; - struct cudbg_tid_data *tid_data = NULL; - u32 value, bytes = 0, bytes_left = 0; - u32 i; - int rc, size; + u32 value, *reg_arr; + u8 i, n = 0; /* Get the LE regions */ - value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base - index */ - tcam_region.tid_hash_base = value; + reg_arr = chip_id(padap) > CHELSIO_T5 ? letcam_region_reg_array : + t5_letcam_region_reg_array; + + cur_region = le_region; + for (i = 0; i < LE_ET_TCAM_MAX; i++) { + if (!reg_arr[i]) + continue; - /* Get routing table index */ - value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX); - tcam_region.routing_start = value; + /* Only consider HASH region if it's enabled */ + if (i == LE_ET_HASH_CON) { + value = t4_read_reg(padap, A_LE_DB_CONFIG); + if (!(value & F_HASHEN)) + continue; + } - /*Get clip table index */ - value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX); - tcam_region.clip_start = value; + /* Only consider regions that are enabled */ + value = t4_read_reg(padap, reg_arr[i]); - /* Get filter table index */ - value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX); - tcam_region.filter_start = value; + /* Each TID occupies 4 entries on T5 TCAM. */ + if (chip_id(padap) < CHELSIO_T6) + value >>= 2; - /* Get server table index */ - value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX); - tcam_region.server_start = value; + if (value >= letcam->max_tid) + continue; - /* Check whether hash is enabled and calculate the max tids */ - value = t4_read_reg(padap, A_LE_DB_CONFIG); - if ((value >> S_HASHEN) & 1) { - value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG); - if (chip_id(padap) > CHELSIO_T5) - tcam_region.max_tid = (value & 0xFFFFF) + - tcam_region.tid_hash_base; - else { /* for T5 */ - value = G_HASHTIDSIZE(value); - value = 1 << value; - tcam_region.max_tid = value + - tcam_region.tid_hash_base; - } - } else /* hash not enabled */ - tcam_region.max_tid = CUDBG_MAX_TCAM_TID; + cur_region->type = i; + cur_region->start = value; + cur_region++; + n++; + } + + /* T5 doesn't have any register to read active region start + * since it always start from 0. So, explicitly add active + * region entry for T5 here. + */ + if (chip_id(padap) < CHELSIO_T6) { + cur_region->type = LE_ET_TCAM_CON; + cur_region->start = 0; + cur_region++; + n++; + } + + qsort(le_region, n, sizeof(struct cudbg_letcam_region), cudbg_letcam_cmp); + + cur_region = le_region; + next_region = le_region + 1; + for (i = 0; i < n; i++, cur_region++, next_region++) { + if (i == n - 1) + cur_region->nentries = letcam->max_tid - + cur_region->start; + else + cur_region->nentries = next_region->start - + cur_region->start; + } + + return n; +} + +int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct cudbg_letcam *out_letcam, letcam = {{ 0 }}; + struct cudbg_letcam_region *out_region, *le_region; + struct cudbg_buffer scratch_buff, region_buff; + struct adapter *padap = pdbg_init->adap; + struct cudbg_tid_data *tid_data = NULL; + u32 bytes = 0, bytes_left = 0; + u32 i, size; + u8 type; + int rc; - size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; - size += sizeof(struct cudbg_tcam); + letcam.ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; + letcam.ver_hdr.revision = CUDBG_LETCAM_REV; + letcam.ver_hdr.size = sizeof(struct cudbg_letcam) - + sizeof(struct cudbg_ver_hdr); + + letcam.max_tid = calculate_max_tids(pdbg_init); + letcam.region_hdr_size = sizeof(struct cudbg_letcam_region); + letcam.tid_data_hdr_size = sizeof(struct cudbg_tid_data); + + /* Get separate region scratch buffer to store region info. + * The final scratch buffer allocated later will be freed up + * after every CUDBG_CHUNK_SIZE max is filled up and written. + * This causes the region info to be lost. Hence, the reason + * to allocate a separate buffer for storing region info. + * + * This region info is needed below for determining which + * region the TID belongs to and skip subsequent TIDs for + * IPv6 entries. + */ + region_buff.size = LE_ET_TCAM_MAX * letcam.region_hdr_size; + GET_SCRATCH_BUFF(dbg_buff, CUDBG_CHUNK_SIZE, ®ion_buff); + le_region = (struct cudbg_letcam_region *)(region_buff.data); + letcam.nregions = cudbg_letcam_get_regions(pdbg_init, &letcam, le_region); + + size = sizeof(letcam); + size += letcam.nregions * letcam.region_hdr_size; + size += letcam.max_tid * letcam.tid_data_hdr_size; scratch_buff.size = size; - rc = write_compression_hdr(&scratch_buff, dbg_buff); + rc = write_compression_hdr(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err; - rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff); - if (rc) - goto err; + /* LETCAM entity is stored in following format: + * + * ==================================================================== + * | letcam_hdr | letcam_region_0 |...| letcam_region_n | letcam_data | + * ==================================================================== + * + * Get scratch buffer to store everything above. This buffer + * will be allocated after the region scratch buffer allocated + * earlier above. + */ + GET_SCRATCH_BUFF(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff); + bytes_left = CUDBG_CHUNK_SIZE; + bytes = 0; - memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam)); + out_letcam = (struct cudbg_letcam *)scratch_buff.data; + memcpy(out_letcam, &letcam, sizeof(letcam)); + bytes_left -= sizeof(letcam); + bytes += sizeof(letcam); - tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *) - scratch_buff.data) + 1); - bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam); - bytes = sizeof(struct cudbg_tcam); + out_region = (struct cudbg_letcam_region *)(out_letcam + 1); + memcpy(out_region, le_region, letcam.nregions * letcam.region_hdr_size); + bytes_left -= letcam.nregions * letcam.region_hdr_size; + bytes += letcam.nregions * letcam.region_hdr_size; + + tid_data = (struct cudbg_tid_data *)(out_region + letcam.nregions); /* read all tid */ - for (i = 0; i < tcam_region.max_tid; i++) { + for (i = 0; i < letcam.max_tid;) { if (bytes_left < sizeof(struct cudbg_tid_data)) { scratch_buff.size = bytes; - rc = compress_buff(&scratch_buff, dbg_buff); + rc = compress_buff(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err1; scratch_buff.size = CUDBG_CHUNK_SIZE; release_scratch_buff(&scratch_buff, dbg_buff); /* new alloc */ - rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, - &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, CUDBG_CHUNK_SIZE, + &scratch_buff); tid_data = (struct cudbg_tid_data *)(scratch_buff.data); bytes_left = CUDBG_CHUNK_SIZE; bytes = 0; } rc = cudbg_read_tid(pdbg_init, i, tid_data); - if (rc) { - cudbg_err->sys_err = rc; - goto err1; + /* We have already written the letcam header, + * so there's no way to go back and undo it. + * Instead, mark current tid larger than + * max_tid. When parser encounters the larger + * tid value, it'll break immediately. + */ + tid_data->tid = letcam.max_tid; + bytes_left -= sizeof(struct cudbg_tid_data); + bytes += sizeof(struct cudbg_tid_data); + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto stop; + } + + /* IPv6 take 2 or more tids based on region */ + if (cudbg_letcam_is_ipv6_entry(tid_data, &letcam, le_region)) { + type = cudbg_letcam_get_type(tid_data->tid, &letcam, + le_region); + if (chip_id(padap) > CHELSIO_T5) { + /* T6 CLIP TCAM IPv6 takes 4 entries */ + if (type == LE_ET_TCAM_CLIP) + i += 4; + else + i += 2; + } else { + /* T5 Filter region IPv6 takes 4 entries */ + if (type == LE_ET_TCAM_FILTER) + i += 4; + else + i += 2; + } + } else { + i++; } tid_data++; @@ -3867,45 +3815,41 @@ static int collect_le_tcam(struct cudbg_init *pdbg_init, bytes += sizeof(struct cudbg_tid_data); } +stop: if (bytes) { scratch_buff.size = bytes; - rc = compress_buff(&scratch_buff, dbg_buff); + rc = compress_buff(pdbg_init, &scratch_buff, dbg_buff); } err1: scratch_buff.size = CUDBG_CHUNK_SIZE; release_scratch_buff(&scratch_buff, dbg_buff); + release_scratch_buff(®ion_buff, dbg_buff); err: return rc; } -static int collect_ma_indirect(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct ireg_buf *ma_indr = NULL; - u32 size, j; int i, rc, n; + u32 size, j; if (chip_id(padap) < CHELSIO_T6) { if (pdbg_init->verbose) pdbg_init->print("MA indirect available only in T6\n"); - rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - goto err; + return CUDBG_STATUS_ENTITY_NOT_FOUND; } n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 2; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); ma_indr = (struct ireg_buf *)scratch_buff.data; - + cudbg_access_lock_acquire(pdbg_init); for (i = 0; i < n; i++) { struct ireg_field *ma_fli = &ma_indr->tp_pio; u32 *buff = ma_indr->outbuf; @@ -3914,17 +3858,13 @@ static int collect_ma_indirect(struct cudbg_init *pdbg_init, ma_fli->ireg_data = t6_ma_ireg_array[i][1]; ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2]; ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3]; - t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data, buff, ma_fli->ireg_offset_range, ma_fli->ireg_local_offset); - ma_indr++; - } n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32)); - for (i = 0; i < n; i++) { struct ireg_field *ma_fli = &ma_indr->tp_pio; u32 *buff = ma_indr->outbuf; @@ -3932,7 +3872,6 @@ static int collect_ma_indirect(struct cudbg_init *pdbg_init, ma_fli->ireg_addr = t6_ma_ireg_array2[i][0]; ma_fli->ireg_data = t6_ma_ireg_array2[i][1]; ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2]; - for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) { t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data, buff, 1, @@ -3942,46 +3881,32 @@ static int collect_ma_indirect(struct cudbg_init *pdbg_init, } ma_indr++; } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + cudbg_access_lock_release(pdbg_init); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_hma_indirect(struct cudbg_init *pdbg_init, +int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct ireg_buf *hma_indr = NULL; - u32 size; int i, rc, n; + u32 size; if (chip_id(padap) < CHELSIO_T6) { if (pdbg_init->verbose) pdbg_init->print("HMA indirect available only in T6\n"); - rc = CUDBG_STATUS_ENTITY_NOT_FOUND; - goto err; + return CUDBG_STATUS_ENTITY_NOT_FOUND; } n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32)); size = sizeof(struct ireg_buf) * n; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); hma_indr = (struct ireg_buf *)scratch_buff.data; - + cudbg_access_lock_acquire(pdbg_init); for (i = 0; i < n; i++) { struct ireg_field *hma_fli = &hma_indr->tp_pio; u32 *buff = hma_indr->outbuf; @@ -3990,48 +3915,32 @@ static int collect_hma_indirect(struct cudbg_init *pdbg_init, hma_fli->ireg_data = t6_hma_ireg_array[i][1]; hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2]; hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3]; - t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data, buff, hma_fli->ireg_offset_range, hma_fli->ireg_local_offset); - hma_indr++; - } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + cudbg_access_lock_release(pdbg_init); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_pcie_indirect(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct ireg_buf *ch_pcie; - u32 size; int i, rc, n; + u32 size; n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 2; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); ch_pcie = (struct ireg_buf *)scratch_buff.data; - /*PCIE_PDBG*/ + cudbg_access_lock_acquire(pdbg_init); for (i = 0; i < n; i++) { struct ireg_field *pcie_pio = &ch_pcie->tp_pio; u32 *buff = ch_pcie->outbuf; @@ -4040,14 +3949,12 @@ static int collect_pcie_indirect(struct cudbg_init *pdbg_init, pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1]; pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2]; pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3]; - t4_read_indirect(padap, pcie_pio->ireg_addr, pcie_pio->ireg_data, buff, pcie_pio->ireg_offset_range, pcie_pio->ireg_local_offset); - ch_pcie++; } @@ -4061,39 +3968,28 @@ static int collect_pcie_indirect(struct cudbg_init *pdbg_init, pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1]; pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2]; pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3]; - t4_read_indirect(padap, pcie_pio->ireg_addr, pcie_pio->ireg_data, buff, pcie_pio->ireg_offset_range, pcie_pio->ireg_local_offset); - ch_pcie++; } - - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - - rc = compress_buff(&scratch_buff, dbg_buff); - -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + cudbg_access_lock_release(pdbg_init); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; - } -static int collect_tp_indirect(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; struct ireg_buf *ch_tp_pio; - u32 size; int i, rc, n = 0; + u32 size; if (is_t5(padap)) n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32)); @@ -4101,14 +3997,8 @@ static int collect_tp_indirect(struct cudbg_init *pdbg_init, n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32)); size = sizeof(struct ireg_buf) * n * 3; - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); ch_tp_pio = (struct ireg_buf *)scratch_buff.data; - /* TP_PIO*/ for (i = 0; i < n; i++) { struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; @@ -4125,10 +4015,8 @@ static int collect_tp_indirect(struct cudbg_init *pdbg_init, tp_pio->ireg_local_offset = t6_tp_pio_array[i][2]; tp_pio->ireg_offset_range = t6_tp_pio_array[i][3]; } - t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range, - tp_pio->ireg_local_offset, true); - + tp_pio->ireg_local_offset, true); ch_tp_pio++; } @@ -4153,10 +4041,8 @@ static int collect_tp_indirect(struct cudbg_init *pdbg_init, tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2]; tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3]; } - t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range, - tp_pio->ireg_local_offset, true); - + tp_pio->ireg_local_offset, true); ch_tp_pio++; } @@ -4185,44 +4071,68 @@ static int collect_tp_indirect(struct cudbg_init *pdbg_init, tp_pio->ireg_offset_range = t6_tp_mib_index_array[i][3]; } - t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range, - tp_pio->ireg_local_offset, true); - + tp_pio->ireg_local_offset, true); ch_tp_pio++; } + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); + return rc; +} - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; - rc = compress_buff(&scratch_buff, dbg_buff); +static int cudbg_read_sge_qbase_indirect_reg(struct adapter *padap, + struct sge_qbase_reg_field *sge_qbase, + u32 pf_vf_no, int isPF) +{ + u32 *buff; -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: - return rc; + if (isPF) { + if (pf_vf_no >= 8) + return CUDBG_STATUS_INVALID_INDEX; + + buff = sge_qbase->pf_data_value[pf_vf_no]; + } else { + if (pf_vf_no >= 256) + return CUDBG_STATUS_INVALID_INDEX; + + buff = sge_qbase->vf_data_value[pf_vf_no]; + pf_vf_no += 8; + /* in SGE_QBASE_INDEX, + * Qbase map index. Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256. + */ + } + + t4_write_reg(padap, sge_qbase->reg_addr, pf_vf_no); + *buff++ = t4_read_reg(padap, sge_qbase->reg_data[0]); + *buff++ = t4_read_reg(padap, sge_qbase->reg_data[1]); + *buff++ = t4_read_reg(padap, sge_qbase->reg_data[2]); + *buff++ = t4_read_reg(padap, sge_qbase->reg_data[3]); + + return 0; } -static int collect_sge_indirect(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; + struct sge_qbase_reg_field *sge_qbase; + struct cudbg_buffer scratch_buff; struct ireg_buf *ch_sge_dbg; + int i, rc, pf, vf; + u8 secollect = 0; u32 size; - int i, rc; - - size = sizeof(struct ireg_buf) * 2; - scratch_buff.size = size; - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; + if (pdbg_init->dbg_params[CUDBG_SECOLLECT_PARAM].param_type == + CUDBG_SECOLLECT_PARAM) + secollect = 1; + size = 2 * sizeof(*ch_sge_dbg); + if (secollect) + size += sizeof(*sge_qbase); + GET_SCRATCH_BUFF(dbg_buff, size, &scratch_buff); ch_sge_dbg = (struct ireg_buf *)scratch_buff.data; - + cudbg_access_lock_acquire(pdbg_init); for (i = 0; i < 2; i++) { struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio; u32 *buff = ch_sge_dbg->outbuf; @@ -4231,39 +4141,55 @@ static int collect_sge_indirect(struct cudbg_init *pdbg_init, sge_pio->ireg_data = t5_sge_dbg_index_array[i][1]; sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2]; sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3]; - t4_read_indirect(padap, sge_pio->ireg_addr, sge_pio->ireg_data, buff, sge_pio->ireg_offset_range, sge_pio->ireg_local_offset); - ch_sge_dbg++; } + cudbg_access_lock_release(pdbg_init); - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; + if (is_t5(padap) || !secollect) + goto out; - rc = compress_buff(&scratch_buff, dbg_buff); + scratch_buff.offset = 2 * sizeof(*ch_sge_dbg); -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: + sge_qbase = (struct sge_qbase_reg_field *)(scratch_buff.data + scratch_buff.offset); + sge_qbase->reg_addr = t6_sge_qbase_index_array[0]; + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ + sge_qbase->reg_data[0] = t6_sge_qbase_index_array[1]; + sge_qbase->reg_data[1] = t6_sge_qbase_index_array[2]; + sge_qbase->reg_data[2] = t6_sge_qbase_index_array[3]; + sge_qbase->reg_data[3] = t6_sge_qbase_index_array[4]; + for (pf = 0; pf < 8; pf++) { + rc = cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, pf, 1); + if (rc) + break; + } + + for (vf = 0; vf < padap->chip_params->vfcount; vf++) { + rc = cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, vf, 0); + if (rc) + break; + } + sge_qbase->vfcount = padap->chip_params->vfcount; + +out: + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); return rc; } -static int collect_full(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_full(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { - struct cudbg_buffer scratch_buff; - struct adapter *padap = pdbg_init->adap; u32 reg_addr, reg_data, reg_local_offset, reg_offset_range; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; + int rc, nreg = 0; u32 *sp; - int rc; - int nreg = 0; /* Collect Registers: * TP_DBG_SCHED_TX (0x7e40 + 0x6a), @@ -4281,43 +4207,31 @@ static int collect_full(struct cudbg_init *pdbg_init, nreg = 7; scratch_buff.size = nreg * sizeof(u32); - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, scratch_buff.size, &scratch_buff); sp = (u32 *)scratch_buff.data; /* TP_DBG_SCHED_TX */ reg_local_offset = t5_tp_pio_array[3][2] + 0xa; reg_offset_range = 1; - t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true); - sp++; /* TP_DBG_SCHED_RX */ reg_local_offset = t5_tp_pio_array[3][2] + 0xb; reg_offset_range = 1; - t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true); - sp++; /* TP_DBG_CSIDE_INT */ reg_local_offset = t5_tp_pio_array[9][2] + 0xf; reg_offset_range = 1; - t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true); - sp++; /* TP_DBG_ESIDE_INT */ reg_local_offset = t5_tp_pio_array[8][2] + 3; reg_offset_range = 1; - t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true); - sp++; /* PCIE_CDEBUG_INDEX[AppData0] */ @@ -4325,10 +4239,7 @@ static int collect_full(struct cudbg_init *pdbg_init, reg_data = t5_pcie_cdbg_array[0][1]; reg_local_offset = t5_pcie_cdbg_array[0][2] + 2; reg_offset_range = 1; - - t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range, - reg_local_offset); - + cudbg_pcie_cdbg_read(pdbg_init, sp, reg_offset_range, reg_local_offset); sp++; if (is_t6(padap)) { @@ -4337,49 +4248,84 @@ static int collect_full(struct cudbg_init *pdbg_init, reg_data = t5_pcie_cdbg_array[0][1]; reg_local_offset = t5_pcie_cdbg_array[0][2] + 3; reg_offset_range = 1; - - t4_read_indirect(padap, reg_addr, reg_data, sp, - reg_offset_range, reg_local_offset); - + cudbg_pcie_cdbg_read(pdbg_init, sp, reg_offset_range, + reg_local_offset); sp++; } /* SGE_DEBUG_DATA_HIGH_INDEX_10 */ *sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); + return rc; +} - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; +static int read_vpd_reg(struct adapter *padap, int addr, int length, + u8 *read_out) +{ + int i; + int j = 0; + int data; - rc = compress_buff(&scratch_buff, dbg_buff); + /* buffer to store data we read from VPD.*/ + /* With extra buffer since se_read reads dword aligned.*/ + u8 vpd_data[MAX_VPD_DATA_LEN]; + int addr_diff; + int base_addr = addr & ~3; + + /* Read next dword b/c if we read addr 0x2 for 4 bytes, we need to + * read both addr 0x0 and addr 0x4. + */ + int max_addr; + + /* checking (addr + length) is multiple of 4 or not */ + if (((addr + length) & 3) == 0) + max_addr = addr + length; + else + max_addr = ((addr + length) & ~3) + 4; + + /* Read from VPD and put each character into the buffer. + * se_read() returns value that is byte swapped so we mask MSB starting + * from right. + */ + for (i = base_addr; i < max_addr; i += 4) { + t4_seeprom_read(padap, i, &data); + /* Undo t4_seeprom_read swizzle, cudbg expects it to be LE. */ + data = cpu_to_le32(data); + vpd_data[j++] = (data & 0x000000ff); + vpd_data[j++] = (data & 0x0000ff00) >> 8; + vpd_data[j++] = (data & 0x00ff0000) >> 16; + vpd_data[j++] = (data & 0xff000000) >> 24; + } + vpd_data[j] = '\0'; /* end string*/ + + /* Need to figure out where in our buffer to start printing our string + * because se_read() reads from dword aligned addresses. For example, + * reading 0xae2 will return the whole dword from 0xae0-0xae3. + */ + addr_diff = addr - base_addr; + for (i = addr_diff, j = 0; i < (addr_diff + length); i++) + read_out[j++] = vpd_data[i]; + + read_out[j] = '\0'; + + return 0; -err1: - release_scratch_buff(&scratch_buff, dbg_buff); -err: - return rc; } -static int collect_vpd_data(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { -#ifdef notyet - struct cudbg_buffer scratch_buff; struct adapter *padap = pdbg_init->adap; struct struct_vpd_data *vpd_data; - char vpd_ver[4]; - u32 fw_vers; - u32 size; + struct cudbg_buffer scratch_buff; + char vpd_ver[VPD_VER_LEN + 2] = { 0 }; + u32 fw_vers = 0; int rc; - size = sizeof(struct struct_vpd_data); - scratch_buff.size = size; - - rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff); - if (rc) - goto err; - + GET_SCRATCH_BUFF(dbg_buff, sizeof(*vpd_data), &scratch_buff); vpd_data = (struct struct_vpd_data *)scratch_buff.data; + memset(vpd_data, 0, sizeof(*vpd_data)); if (is_t5(padap)) { read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn); @@ -4394,7 +4340,7 @@ static int collect_vpd_data(struct cudbg_init *pdbg_init, } if (is_fw_attached(pdbg_init)) { - rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers); + rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers); } else { rc = 1; } @@ -4403,8 +4349,10 @@ static int collect_vpd_data(struct cudbg_init *pdbg_init, /* Now trying with backdoor mechanism */ rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN, (u8 *)&vpd_data->scfg_vers); - if (rc) - goto err1; + if (rc) { + cudbg_debug(pdbg_init, "FAIL - reading serial config version. Continuing...\n"); + rc = 0; + } } if (is_fw_attached(pdbg_init)) { @@ -4417,38 +4365,65 @@ static int collect_vpd_data(struct cudbg_init *pdbg_init, /* Now trying with backdoor mechanism */ rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN, (u8 *)vpd_ver); - if (rc) - goto err1; - /* read_vpd_reg return string of stored hex - * converting hex string to char string - * vpd version is 2 bytes only */ - sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]); - vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16); + if (rc) { + cudbg_debug(pdbg_init, "FAIL - reading VPD version. Continuing...\n"); + rc = 0; + } else { + /* read_vpd_reg return string of stored hex + * converting hex string to char string + * vpd version is 2 bytes only + */ + snprintf(vpd_ver, VPD_VER_LEN + 2, "%c%c\n", vpd_ver[0], vpd_ver[1]); + vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16); + } } /* Get FW version if it's not already filled in */ fw_vers = padap->params.fw_vers; if (!fw_vers) { - rc = t4_get_fw_version(padap, &fw_vers); - if (rc) - goto err1; + rc = t4_get_flash_params(padap); + if (rc) { + cudbg_debug(pdbg_init, "FAIL - reading flash params for fw version. Continuing...\n"); + rc = 0; + } else { + rc = t4_get_fw_version(padap, &fw_vers); + if (rc) { + cudbg_debug(pdbg_init, "FAIL - reading fw version. Continuing...\n"); + fw_vers = 0; + rc = 0; + } + } } vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers); vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers); vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers); vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers); + WRITE_AND_RELEASE_SCRATCH_BUFF(&scratch_buff, dbg_buff); + return rc; +} - rc = write_compression_hdr(&scratch_buff, dbg_buff); - if (rc) - goto err1; +int cudbg_collect_upload(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer scratch_buff; + u32 param, *value; + int rc; - rc = compress_buff(&scratch_buff, dbg_buff); + if (!is_fw_attached(pdbg_init)) + return CUDBG_SYSTEM_ERROR; + GET_SCRATCH_BUFF(dbg_buff, sizeof(u32), &scratch_buff); + value = (u32 *)scratch_buff.data; + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD)); + rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 1, ¶m, value); + if (rc < 0) + goto err1; + WRITE_AND_COMPRESS_SCRATCH_BUFF(&scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); -err: return rc; -#endif - return (CUDBG_STATUS_NOT_IMPLEMENTED); } diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.h b/sys/dev/cxgbe/cudbg/cudbg_lib.h index 6a67b9c3924..b3c3cf5df5c 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_lib.h +++ b/sys/dev/cxgbe/cudbg/cudbg_lib.h @@ -34,222 +34,396 @@ #define min_t(type, _a, _b) (((type)(_a) < (type)(_b)) ? (type)(_a) : (type)(_b)) #endif -static int collect_reg_dump(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_fw_devlog(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_qcfg(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_la(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ma_la(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_obq_ulp0(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_obq_ulp1(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_obq_ulp2(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_obq_ulp3(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_obq_sge(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_obq_ncsi(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ibq_tp0(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ibq_tp1(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ibq_ulp(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ibq_sge0(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ibq_sge1(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_ibq_ncsi(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_edc0_meminfo(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_edc1_meminfo(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_mc0_meminfo(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_mc1_meminfo(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_rss(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_rss_key(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_rss_pf_config(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_rss_vf_config(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_rss_config(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_path_mtu(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_sw_state(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -int collect_wtp_data(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_pm_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_hw_sched(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_tcp_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_tp_err_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_fcoe_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_rdma_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_tp_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_sge_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cpl_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_ddp_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_wc_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_ulprx_la(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_lb_stats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_tp_la(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_meminfo(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cim_pif_la(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_clk_info(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_obq_sge_rx_q0(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_obq_sge_rx_q1(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_macstats(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_pcie_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_pm_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_full(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_tx_rate(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_tid(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_pcie_config(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_dump_context(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_mps_tcam(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_vpd_data(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_le_tcam(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_cctrl(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_ma_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_ulptx_la(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_up_cim_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_pbt_tables(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_mbox_log(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); -static int collect_hma_indirect(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *); +int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rss(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rss_key(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rss_pf_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rss_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_sw_state(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_wtp_data(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tcp_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tp_err_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_fcoe_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rdma_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cpl_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_ddp_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_wc_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_lb_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_macstats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_full(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tx_rate(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tid(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_upload(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_module_eeprom(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); -static int (*process_entity[]) - (struct cudbg_init *, struct cudbg_buffer *, struct cudbg_error *) = { - collect_reg_dump, - collect_fw_devlog, - collect_cim_la, /*3*/ - collect_cim_ma_la, - collect_cim_qcfg, - collect_cim_ibq_tp0, - collect_cim_ibq_tp1, - collect_cim_ibq_ulp, - collect_cim_ibq_sge0, - collect_cim_ibq_sge1, - collect_cim_ibq_ncsi, - collect_cim_obq_ulp0, - collect_cim_obq_ulp1, /*13*/ - collect_cim_obq_ulp2, - collect_cim_obq_ulp3, - collect_cim_obq_sge, - collect_cim_obq_ncsi, - collect_edc0_meminfo, - collect_edc1_meminfo, - collect_mc0_meminfo, - collect_mc1_meminfo, - collect_rss, /*22*/ - collect_rss_pf_config, - collect_rss_key, - collect_rss_vf_config, - collect_rss_config, /*26*/ - collect_path_mtu, /*27*/ - collect_sw_state, - collect_wtp_data, - collect_pm_stats, - collect_hw_sched, - collect_tcp_stats, - collect_tp_err_stats, - collect_fcoe_stats, - collect_rdma_stats, - collect_tp_indirect, - collect_sge_indirect, - collect_cpl_stats, - collect_ddp_stats, - collect_wc_stats, - collect_ulprx_la, - collect_lb_stats, - collect_tp_la, - collect_meminfo, - collect_cim_pif_la, - collect_clk_info, - collect_obq_sge_rx_q0, - collect_obq_sge_rx_q1, - collect_macstats, - collect_pcie_indirect, - collect_pm_indirect, - collect_full, - collect_tx_rate, - collect_tid, - collect_pcie_config, - collect_dump_context, - collect_mps_tcam, - collect_vpd_data, - collect_le_tcam, - collect_cctrl, - collect_ma_indirect, - collect_ulptx_la, +static int (*process_entity[])(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) = { + cudbg_collect_reg_dump, + cudbg_collect_fw_devlog, + cudbg_collect_cim_la, /*3*/ + cudbg_collect_cim_ma_la, + cudbg_collect_cim_qcfg, + cudbg_collect_cim_ibq_tp0, + cudbg_collect_cim_ibq_tp1, + cudbg_collect_cim_ibq_ulp, + cudbg_collect_cim_ibq_sge0, + cudbg_collect_cim_ibq_sge1, + cudbg_collect_cim_ibq_ncsi, + cudbg_collect_cim_obq_ulp0, + cudbg_collect_cim_obq_ulp1, /*13*/ + cudbg_collect_cim_obq_ulp2, + cudbg_collect_cim_obq_ulp3, + cudbg_collect_cim_obq_sge, + cudbg_collect_cim_obq_ncsi, + cudbg_collect_edc0_meminfo, + cudbg_collect_edc1_meminfo, + cudbg_collect_mc0_meminfo, + cudbg_collect_mc1_meminfo, + cudbg_collect_rss, /*22*/ + cudbg_collect_rss_pf_config, + cudbg_collect_rss_key, + cudbg_collect_rss_vf_config, + cudbg_collect_rss_config, /*26*/ + cudbg_collect_path_mtu, /*27*/ + cudbg_collect_sw_state, + cudbg_collect_wtp_data, + cudbg_collect_pm_stats, + cudbg_collect_hw_sched, + cudbg_collect_tcp_stats, + cudbg_collect_tp_err_stats, + cudbg_collect_fcoe_stats, + cudbg_collect_rdma_stats, + cudbg_collect_tp_indirect, + cudbg_collect_sge_indirect, + cudbg_collect_cpl_stats, + cudbg_collect_ddp_stats, + cudbg_collect_wc_stats, + cudbg_collect_ulprx_la, + cudbg_collect_lb_stats, + cudbg_collect_tp_la, + cudbg_collect_meminfo, + cudbg_collect_cim_pif_la, + cudbg_collect_clk_info, + cudbg_collect_obq_sge_rx_q0, + cudbg_collect_obq_sge_rx_q1, + cudbg_collect_macstats, + cudbg_collect_pcie_indirect, + cudbg_collect_pm_indirect, + cudbg_collect_full, + cudbg_collect_tx_rate, + cudbg_collect_tid, + cudbg_collect_pcie_config, + cudbg_collect_dump_context, + cudbg_collect_mps_tcam, + cudbg_collect_vpd_data, + cudbg_collect_le_tcam, + cudbg_collect_cctrl, + cudbg_collect_ma_indirect, + cudbg_collect_ulptx_la, NULL, /* ext entity */ - collect_up_cim_indirect, - collect_pbt_tables, - collect_mbox_log, - collect_hma_indirect, + cudbg_collect_up_cim_indirect, + cudbg_collect_pbt_tables, + cudbg_collect_mbox_log, + cudbg_collect_hma_indirect, + cudbg_collect_hma_meminfo, + cudbg_collect_upload, + NULL, /* queue descriptors - Driver specific */ + cudbg_collect_module_eeprom, }; +static int ATTRIBUTE_UNUSED entity_priority_list[] = { + CUDBG_MBOX_LOG, + CUDBG_QDESC, + CUDBG_REG_DUMP, + CUDBG_DEV_LOG, + CUDBG_CIM_LA, + CUDBG_CIM_MA_LA, + CUDBG_CIM_QCFG, + CUDBG_CIM_IBQ_TP0, + CUDBG_CIM_IBQ_TP1, + CUDBG_CIM_IBQ_ULP, + CUDBG_CIM_IBQ_SGE0, + CUDBG_CIM_IBQ_SGE1, + CUDBG_CIM_IBQ_NCSI, + CUDBG_CIM_OBQ_ULP0, + CUDBG_CIM_OBQ_ULP1, + CUDBG_CIM_OBQ_ULP2, + CUDBG_CIM_OBQ_ULP3, + CUDBG_CIM_OBQ_SGE, + CUDBG_CIM_OBQ_NCSI, + CUDBG_EDC0, + CUDBG_EDC1, + CUDBG_MC0, + CUDBG_MC1, + CUDBG_RSS, + CUDBG_RSS_PF_CONF, + CUDBG_RSS_KEY, + CUDBG_RSS_VF_CONF, + CUDBG_RSS_CONF, + CUDBG_PATH_MTU, + CUDBG_SW_STATE, + CUDBG_WTP, + CUDBG_PM_STATS, + CUDBG_HW_SCHED, + CUDBG_TCP_STATS, + CUDBG_TP_ERR_STATS, + CUDBG_FCOE_STATS, + CUDBG_RDMA_STATS, + CUDBG_TP_INDIRECT, + CUDBG_SGE_INDIRECT, + CUDBG_CPL_STATS, + CUDBG_DDP_STATS, + CUDBG_WC_STATS, + CUDBG_ULPRX_LA, + CUDBG_LB_STATS, + CUDBG_TP_LA, + CUDBG_MEMINFO, + CUDBG_CIM_PIF_LA, + CUDBG_CLK, + CUDBG_CIM_OBQ_RXQ0, + CUDBG_CIM_OBQ_RXQ1, + CUDBG_MAC_STATS, + CUDBG_PCIE_INDIRECT, + CUDBG_PM_INDIRECT, + CUDBG_FULL, + CUDBG_TX_RATE, + CUDBG_TID_INFO, + CUDBG_PCIE_CONFIG, + CUDBG_DUMP_CONTEXT, + CUDBG_MPS_TCAM, + CUDBG_VPD_DATA, + CUDBG_LE_TCAM, + CUDBG_CCTRL, + CUDBG_MA_INDIRECT, + CUDBG_ULPTX_LA, + CUDBG_EXT_ENTITY, + CUDBG_UP_CIM_INDIRECT, + CUDBG_PBT_TABLE, + CUDBG_HMA_INDIRECT, + CUDBG_HMA, + CUDBG_UPLOAD, + CUDBG_MOD_EEPROM, +}; + struct large_entity { int entity_code; int skip_flag; int priority; /* 1 is high priority */ }; -static int read_cim_ibq(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error * , int); -static int read_cim_obq(struct cudbg_init *, struct cudbg_buffer *, - struct cudbg_error *, int); +static inline void cudbg_access_lock_acquire(struct cudbg_init *dbg_init) +{ + if (dbg_init->lock_cb) + dbg_init->lock_cb(dbg_init->access_lock); +} + +static inline void cudbg_access_lock_release(struct cudbg_init *dbg_init) +{ + if (dbg_init->unlock_cb) + dbg_init->unlock_cb(dbg_init->access_lock); +} + int get_entity_hdr(void *outbuf, int i, u32 size, struct cudbg_entity_hdr **); -void skip_entity(int entity_code); -void reset_skip_entity(void); -int is_large_entity(int entity_code); +void skip_entity(struct large_entity *, int large_entity_list_size, + int entity_code); +void reset_skip_entity(struct large_entity *, int large_entity_list_size); +int is_large_entity(struct large_entity *, int large_entity_list_size, + int entity_code); + +int cudbg_get_mem_region(struct struct_meminfo *meminfo, + const char *region_name, + struct struct_mem_desc *mem_desc); +void cudbg_get_mem_relative(struct struct_meminfo *meminfo, + u32 *out_base, u32 *out_end, + u8 *mem_type); +int cudbg_dump_context_size(struct adapter *padap); +void align_debug_buffer(struct cudbg_buffer *dbg_buff, + struct cudbg_entity_hdr *entity_hdr); #endif diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h index 16fbe291e37..cefd8d1a7cb 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h +++ b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h @@ -57,9 +57,7 @@ * multiple times. */ -#ifndef CUDBG_LITE #include "common/t4_hw.h" -#endif #define CUDBG_SF_MAX_SECTOR (FLASH_CUDBG_START_SEC + FLASH_CUDBG_NSECS) #define CUDBG_SF_SECTOR_SIZE SF_SEC_SIZE @@ -69,6 +67,12 @@ #define CUDBG_EXT_DATA_BIT 0 #define CUDBG_EXT_DATA_VALID (1 << CUDBG_EXT_DATA_BIT) +enum cudbg_compression_type { + CUDBG_COMPRESSION_FASTLZ = 0, + CUDBG_COMPRESSION_NONE = 1, + CUDBG_COMPRESSION_ZLIB, +}; + struct cudbg_hdr { u32 signature; u32 hdr_len; @@ -78,7 +82,8 @@ struct cudbg_hdr { u32 hdr_flags; u16 max_entities; u8 chip_ver; - u8 reserved1; + u8 reserved1:4; + u8 compress_type:4; u32 reserved[8]; }; @@ -114,6 +119,11 @@ struct cudbg_error { int app_err; }; +struct cudbg_private { + struct cudbg_init dbg_init; + struct cudbg_flash_sec_info *psec_info; +}; + struct cudbg_flash_sec_info { int par_sec; /* Represent partially filled sector no */ int par_sec_offset; /* Offset in partially filled sector */ @@ -127,11 +137,6 @@ struct cudbg_flash_sec_info { u8 sec_bitmap[8]; }; -struct cudbg_private { - struct cudbg_init dbg_init; - struct cudbg_flash_sec_info sec_info; -}; - #define HTONL_NIBBLE(data) ( \ (((uint32_t)(data) >> 28) & 0x0000000F) | \ (((uint32_t)(data) >> 20) & 0x000000F0) | \ @@ -153,22 +158,21 @@ struct cudbg_private { #define CUDBG_FL_MINOR_VERSION 1 #define CUDBG_FL_BUILD_VERSION 0 -void update_skip_size(struct cudbg_flash_sec_info *, u32); -int write_compression_hdr(struct cudbg_buffer *, struct cudbg_buffer *); -int compress_buff(struct cudbg_buffer *, struct cudbg_buffer *); +u32 get_skip_size(struct cudbg_flash_sec_info *psec_info); +void update_skip_size(struct cudbg_flash_sec_info *psec_info, u32 size); +void cudbg_update_entity_hdr(struct cudbg_init *pdbg_init, u32 size); +int write_compression_hdr(struct cudbg_init *, struct cudbg_buffer *, + struct cudbg_buffer *); +int compress_buff(struct cudbg_init *, struct cudbg_buffer *, + struct cudbg_buffer *); int get_scratch_buff(struct cudbg_buffer *, u32, struct cudbg_buffer *); +int get_scratch_buff_aligned(struct cudbg_buffer *pdbg_buff, u32 size, + struct cudbg_buffer *pscratch_buff, u32 align); void release_scratch_buff(struct cudbg_buffer *, struct cudbg_buffer *); -int decompress_buffer(struct cudbg_buffer *, struct cudbg_buffer *); -int validate_buffer(struct cudbg_buffer *compressed_buffer); -int decompress_buffer_wrapper(struct cudbg_buffer *pc_buff, - struct cudbg_buffer *pdc_buff); -int get_entity_rev(struct cudbg_ver_hdr *ver_hdr); -void sort_t(void *base, int num, int size, - int (*cmp_func)(const void *, const void *), - void (*swap_func)(void *, void *, int size)); -int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag); +u16 get_entity_rev(struct cudbg_ver_hdr *ver_hdr); int cudbg_write_flash(void *handle, u64 timestamp, void *data, u32 start_offset, u32 start_hdr_offset, u32 cur_entity_size, u32 ext_size); +int cudbg_sge_ctxt_check_valid(u32 *buf, int type); #endif diff --git a/sys/dev/cxgbe/cudbg/cudbg_wtp.c b/sys/dev/cxgbe/cudbg/cudbg_wtp.c index a72534d987e..a63cb9ffa24 100644 --- a/sys/dev/cxgbe/cudbg/cudbg_wtp.c +++ b/sys/dev/cxgbe/cudbg/cudbg_wtp.c @@ -35,10 +35,8 @@ __FBSDID("$FreeBSD$"); #include "cudbg.h" #include "cudbg_lib_common.h" #include "cudbg_entity.h" +#include "cudbg_lib.h" -int collect_wtp_data(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err); /*SGE_DEBUG Registers.*/ #define TP_MIB_SIZE 0x5e @@ -259,17 +257,17 @@ struct tp_mib_type tp_mib[] = { static u32 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg) { struct adapter *padap = pdbg_init->adap; - u32 value; + u32 valuelo = 0, valuehi = 0; int i = 0; for (i = 0; i <= 15; i++) { t4_write_reg(padap, A_SGE_DEBUG_INDEX, (u32)i); - value = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW); - /*printf("LOW 0x%08x\n", value);*/ - sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(value); - value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH); - /*printf("HIGH 0x%08x\n", value);*/ - sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(value); + valuelo = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW); + valuehi = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH); + /*printf("LOW 0x%08x\n", valuelo);*/ + sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(valuelo); + /*printf("HIGH 0x%08x\n", valuehi);*/ + sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(valuehi); } return 0; } @@ -277,12 +275,11 @@ static u32 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg) static u32 read_tp_mib_data(struct cudbg_init *pdbg_init, struct tp_mib_data **ppTp_Mib) { - struct adapter *padap = pdbg_init->adap; u32 i = 0; for (i = 0; i < TP_MIB_SIZE; i++) { - t4_tp_mib_read(padap, &tp_mib[i].value, 1, - (u32)tp_mib[i].addr, true); + t4_tp_mib_read(pdbg_init->adap, &tp_mib[i].value, 1, + (u32)tp_mib[i].addr, true); } *ppTp_Mib = (struct tp_mib_data *)&tp_mib[0]; @@ -498,7 +495,7 @@ static int t5_wtp_data(struct cudbg_init *pdbg_init, /* Get TP_DBG_CSIDE registers*/ for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i), - true); + true); wtp->utx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/ wtp->utx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/ @@ -513,7 +510,7 @@ static int t5_wtp_data(struct cudbg_init *pdbg_init, /* TP_DBG_ESIDE*/ for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i), - true); + true); wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/ wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/ @@ -778,7 +775,7 @@ static int t5_wtp_data(struct cudbg_init *pdbg_init, /* Get TP_DBG_CSIDE_TX registers*/ for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i), - true); + true); wtp->tpcside_csw.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/ wtp->tpcside_csw.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/ @@ -861,7 +858,7 @@ static int t5_wtp_data(struct cudbg_init *pdbg_init, /*Get TP debug CSIDE Tx registers*/ for (i = 0; i < 2; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i), - true); + true); wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31 */ @@ -885,7 +882,7 @@ static int t5_wtp_data(struct cudbg_init *pdbg_init, /*Get TP debug Eside PKTx*/ for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i), - true); + true); wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF); wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF); @@ -918,12 +915,11 @@ static int t5_wtp_data(struct cudbg_init *pdbg_init, wtp->pcie_core_dmai.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/ wtp->pcie_core_dmai.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/ - rc = write_compression_hdr(&scratch_buff, dbg_buff); - + rc = write_compression_hdr(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err1; - rc = compress_buff(&scratch_buff, dbg_buff); + rc = compress_buff(pdbg_init, &scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); @@ -1021,7 +1017,7 @@ static int t6_wtp_data(struct cudbg_init *pdbg_init, /* Get TP_DBG_CSIDE registers*/ for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i), - true); + true); wtp->utx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/ wtp->utx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/ @@ -1031,8 +1027,7 @@ static int t6_wtp_data(struct cudbg_init *pdbg_init, for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i), - true); - + true); wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/ wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/ @@ -1105,7 +1100,7 @@ static int t6_wtp_data(struct cudbg_init *pdbg_init, for (i = 0; i < 2; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i), - true); + true); wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31 */ @@ -1130,7 +1125,7 @@ static int t6_wtp_data(struct cudbg_init *pdbg_init, /*Get TP debug Eside PKTx*/ for (i = 0; i < 4; i++) { t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i), - true); + true); wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF); wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF); @@ -1281,12 +1276,11 @@ static int t6_wtp_data(struct cudbg_init *pdbg_init, } wtp->xgm_mps.err = (err & 0xFF); - rc = write_compression_hdr(&scratch_buff, dbg_buff); - + rc = write_compression_hdr(pdbg_init, &scratch_buff, dbg_buff); if (rc) goto err1; - rc = compress_buff(&scratch_buff, dbg_buff); + rc = compress_buff(pdbg_init, &scratch_buff, dbg_buff); err1: release_scratch_buff(&scratch_buff, dbg_buff); @@ -1294,9 +1288,9 @@ static int t6_wtp_data(struct cudbg_init *pdbg_init, return rc; } -int collect_wtp_data(struct cudbg_init *pdbg_init, - struct cudbg_buffer *dbg_buff, - struct cudbg_error *cudbg_err) +int cudbg_collect_wtp_data(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; int rc = -1; diff --git a/sys/dev/cxgbe/cudbg/fastlz.c b/sys/dev/cxgbe/cudbg/fastlz.c index 41e5c99b287..1757741ab2f 100644 --- a/sys/dev/cxgbe/cudbg/fastlz.c +++ b/sys/dev/cxgbe/cudbg/fastlz.c @@ -27,102 +27,19 @@ __FBSDID("$FreeBSD$"); #include "osdep.h" +#include "fastlz_common.h" #include "fastlz.h" -#if !defined(FASTLZ__COMPRESSOR) && !defined(FASTLZ_DECOMPRESSOR) - -/* - * Always check for bound when decompressing. - * Generally it is best to leave it defined. - */ -#define FASTLZ_SAFE - -#if defined(WIN32) || defined(__NT__) || defined(_WIN32) || defined(__WIN32__) -#if defined(_MSC_VER) || defined(__GNUC__) -/* #include */ -#pragma warning(disable : 4242) -#pragma warning(disable : 4244) -#endif -#endif - -/* - * Give hints to the compiler for branch prediction optimization. - */ -#if defined(__GNUC__) && (__GNUC__ > 2) -#define FASTLZ_EXPECT_CONDITIONAL(c) (__builtin_expect((c), 1)) -#define FASTLZ_UNEXPECT_CONDITIONAL(c) (__builtin_expect((c), 0)) -#else -#define FASTLZ_EXPECT_CONDITIONAL(c) (c) -#define FASTLZ_UNEXPECT_CONDITIONAL(c) (c) -#endif - -/* - * Use inlined functions for supported systems. - */ -#if defined(__GNUC__) || defined(__DMC__) || defined(__POCC__) ||\ - defined(__WATCOMC__) || defined(__SUNPRO_C) -#define FASTLZ_INLINE inline -#elif defined(__BORLANDC__) || defined(_MSC_VER) || defined(__LCC__) -#define FASTLZ_INLINE __inline -#else -#define FASTLZ_INLINE -#endif - -/* - * Prevent accessing more than 8-bit at once, except on x86 architectures. - */ -#if !defined(FASTLZ_STRICT_ALIGN) -#define FASTLZ_STRICT_ALIGN -#if defined(__i386__) || defined(__386) /* GNU C, Sun Studio */ -#undef FASTLZ_STRICT_ALIGN -#elif defined(__i486__) || defined(__i586__) || defined(__i686__) /* GNU C */ -#undef FASTLZ_STRICT_ALIGN -#elif defined(_M_IX86) /* Intel, MSVC */ -#undef FASTLZ_STRICT_ALIGN -#elif defined(__386) -#undef FASTLZ_STRICT_ALIGN -#elif defined(_X86_) /* MinGW */ -#undef FASTLZ_STRICT_ALIGN -#elif defined(__I86__) /* Digital Mars */ -#undef FASTLZ_STRICT_ALIGN -#endif -#endif - -/* - * FIXME: use preprocessor magic to set this on different platforms! - */ - -#define MAX_COPY 32 -#define MAX_LEN 264 /* 256 + 8 */ -#define MAX_DISTANCE 8192 - -#if !defined(FASTLZ_STRICT_ALIGN) -#define FASTLZ_READU16(p) (*((const unsigned short *)(p))) -#else -#define FASTLZ_READU16(p) ((p)[0] | (p)[1]<<8) -#endif - -#define HASH_LOG 13 -#define HASH_SIZE (1 << HASH_LOG) -#define HASH_MASK (HASH_SIZE - 1) -#define HASH_FUNCTION(v, p) {\ - v = FASTLZ_READU16(p);\ - v ^= FASTLZ_READU16(p + 1)^\ - (v>>(16 - HASH_LOG));\ - v &= HASH_MASK;\ - } +#if !defined(FASTLZ_COMPRESSOR) #undef FASTLZ_LEVEL #define FASTLZ_LEVEL 1 #undef FASTLZ_COMPRESSOR -#undef FASTLZ_DECOMPRESSOR #define FASTLZ_COMPRESSOR fastlz1_compress -#define FASTLZ_DECOMPRESSOR fastlz1_decompress -static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length, +static FASTLZ_INLINE int FASTLZ_COMPRESSOR(unsigned char *hash_table, + const void *input, int length, void *output); -static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length, - void *output, int maxout); #include "fastlz.c" #undef FASTLZ_LEVEL @@ -133,63 +50,47 @@ static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length, #define MAX_FARDISTANCE (65535 + MAX_DISTANCE - 1) #undef FASTLZ_COMPRESSOR -#undef FASTLZ_DECOMPRESSOR #define FASTLZ_COMPRESSOR fastlz2_compress -#define FASTLZ_DECOMPRESSOR fastlz2_decompress -static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length, +static FASTLZ_INLINE int FASTLZ_COMPRESSOR(unsigned char *hash_table, + const void *input, int length, void *output); -static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length, - void *output, int maxout); #include "fastlz.c" -int fastlz_compress(const void *input, int length, void *output) +int fastlz_compress(unsigned char *hash_table, const void *input, int length, + void *output) { /* for short block, choose fastlz1 */ if (length < 65536) - return fastlz1_compress(input, length, output); + return fastlz1_compress(hash_table, input, length, output); /* else... */ - return fastlz2_compress(input, length, output); -} - -int fastlz_decompress(const void *input, int length, void *output, int maxout) -{ - /* magic identifier for compression level */ - int level = ((*(const unsigned char *)input) >> 5) + 1; - - if (level == 1) - return fastlz1_decompress(input, length, output, maxout); - if (level == 2) - return fastlz2_decompress(input, length, output, maxout); - - /* unknown level, trigger error */ - return 0; + return fastlz2_compress(hash_table, input, length, output); } -int fastlz_compress_level(int level, const void *input, int length, +int fastlz_compress_level(unsigned char *hash_table, int level, + const void *input, int length, void *output) { if (level == 1) - return fastlz1_compress(input, length, output); + return fastlz1_compress(hash_table, input, length, output); if (level == 2) - return fastlz2_compress(input, length, output); + return fastlz2_compress(hash_table, input, length, output); return 0; } -#else /* !defined(FASTLZ_COMPRESSOR) && !defined(FASTLZ_DECOMPRESSOR) */ - +#else /* !defined(FASTLZ_COMPRESSOR) */ -static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length, +static FASTLZ_INLINE int FASTLZ_COMPRESSOR(unsigned char *hash_table, + const void *input, int length, void *output) { const unsigned char *ip = (const unsigned char *) input; const unsigned char *ip_bound = ip + length - 2; const unsigned char *ip_limit = ip + length - 12; unsigned char *op = (unsigned char *) output; - static const unsigned char *g_htab[HASH_SIZE]; - const unsigned char **htab = g_htab; + const unsigned char **htab = (const unsigned char **)hash_table; const unsigned char **hslot; unsigned int hval; @@ -209,7 +110,7 @@ static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length, } /* initializes hash table */ - for (hslot = htab; hslot < htab + HASH_SIZE; hslot++) + for (hslot = htab; hslot < htab + FASTLZ_HASH_SIZE; hslot++) *hslot = ip; /* we start with literal copy */ @@ -229,6 +130,9 @@ static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length, /* comparison starting-point */ const unsigned char *anchor = ip; + if (!anchor) + return 0; + /* check for a run */ #if FASTLZ_LEVEL == 2 if (ip[0] == ip[-1] && @@ -427,129 +331,4 @@ static FASTLZ_INLINE int FASTLZ_COMPRESSOR(const void *input, int length, return op - (unsigned char *)output; } - -static FASTLZ_INLINE int FASTLZ_DECOMPRESSOR(const void *input, int length, - void *output, int maxout) -{ - const unsigned char *ip = (const unsigned char *) input; - const unsigned char *ip_limit = ip + length; - unsigned char *op = (unsigned char *) output; - unsigned char *op_limit = op + maxout; - unsigned int ctrl = (*ip++) & 31; - int loop = 1; - - do { - const unsigned char *ref = op; - unsigned int len = ctrl >> 5; - unsigned int ofs = (ctrl & 31) << 8; - - if (ctrl >= 32) { -#if FASTLZ_LEVEL == 2 - unsigned char code; -#endif - len--; - ref -= ofs; - if (len == 7 - 1) -#if FASTLZ_LEVEL == 1 - len += *ip++; - ref -= *ip++; -#else - do { - code = *ip++; - len += code; - } while (code == 255); - code = *ip++; - ref -= code; - - /* match from 16-bit distance */ - if (FASTLZ_UNEXPECT_CONDITIONAL(code == 255)) - if (FASTLZ_EXPECT_CONDITIONAL(ofs == - (31 << 8))) { - ofs = (*ip++) << 8; - ofs += *ip++; - ref = op - ofs - MAX_DISTANCE; - } -#endif - -#ifdef FASTLZ_SAFE - if (FASTLZ_UNEXPECT_CONDITIONAL(op + len + 3 > - op_limit)) - return 0; - - if (FASTLZ_UNEXPECT_CONDITIONAL(ref - 1 < - (unsigned char *)output) - ) - return 0; -#endif - - if (FASTLZ_EXPECT_CONDITIONAL(ip < ip_limit)) - ctrl = *ip++; - else - loop = 0; - - if (ref == op) { - /* optimize copy for a run */ - unsigned char b = ref[-1]; - *op++ = b; - *op++ = b; - *op++ = b; - for (; len; --len) - *op++ = b; - } else { -#if !defined(FASTLZ_STRICT_ALIGN) - const unsigned short *p; - unsigned short *q; -#endif - /* copy from reference */ - ref--; - *op++ = *ref++; - *op++ = *ref++; - *op++ = *ref++; - -#if !defined(FASTLZ_STRICT_ALIGN) - /* copy a byte, so that now it's word aligned */ - if (len & 1) { - *op++ = *ref++; - len--; - } - - /* copy 16-bit at once */ - q = (unsigned short *) op; - op += len; - p = (const unsigned short *) ref; - for (len >>= 1; len > 4; len -= 4) { - *q++ = *p++; - *q++ = *p++; - *q++ = *p++; - *q++ = *p++; - } - for (; len; --len) - *q++ = *p++; -#else - for (; len; --len) - *op++ = *ref++; -#endif - } - } else { - ctrl++; -#ifdef FASTLZ_SAFE - if (FASTLZ_UNEXPECT_CONDITIONAL(op + ctrl > op_limit)) - return 0; - if (FASTLZ_UNEXPECT_CONDITIONAL(ip + ctrl > ip_limit)) - return 0; -#endif - - *op++ = *ip++; - for (--ctrl; ctrl; ctrl--) - *op++ = *ip++; - - loop = FASTLZ_EXPECT_CONDITIONAL(ip < ip_limit); - if (loop) - ctrl = *ip++; - } - } while (FASTLZ_EXPECT_CONDITIONAL(loop)); - - return op - (unsigned char *)output; -} - -#endif /* !defined(FASTLZ_COMPRESSOR) && !defined(FASTLZ_DECOMPRESSOR) */ +#endif /* !defined(FASTLZ_COMPRESSOR) */ diff --git a/sys/dev/cxgbe/cudbg/fastlz.h b/sys/dev/cxgbe/cudbg/fastlz.h index 5aa474fa5a8..395e3d8c332 100644 --- a/sys/dev/cxgbe/cudbg/fastlz.h +++ b/sys/dev/cxgbe/cudbg/fastlz.h @@ -36,27 +36,17 @@ #define FASTLZ_VERSION_STRING "0.1.0" -struct cudbg_buffer; +int fastlz_compress(unsigned char *hash_table, const void *input, int length, + void *output); -int fastlz_compress(const void *input, int length, void *output); -int fastlz_compress_level(int level, const void *input, int length, - void *output); -int fastlz_decompress(const void *input, int length, void *output, int maxout); +int fastlz_compress_level(unsigned char *hash_table, int level, + const void *input, int length, void *output); /* prototypes */ - -int write_magic(struct cudbg_buffer *); -int detect_magic(struct cudbg_buffer *); - -int write_to_buf(void *, u32, u32 *, void *, u32); -int read_from_buf(void *, u32, u32 *, void *, u32); - -int write_chunk_header(struct cudbg_buffer *, int, int, unsigned long, - unsigned long, unsigned long); - -int read_chunk_header(struct cudbg_buffer *, int* , int*, unsigned long*, - unsigned long*, unsigned long*); - -unsigned long block_compress(const unsigned char *, unsigned long length, - unsigned char *); +struct cudbg_init; +struct cudbg_buffer; +int write_magic(struct cudbg_init *, struct cudbg_buffer *); +int write_to_buf(struct cudbg_init *, void *, u32, u32 *, void *, u32); +int write_chunk_header(struct cudbg_init *, struct cudbg_buffer *, int, int, + unsigned long, unsigned long, unsigned long); #endif /* FASTLZ_H */ diff --git a/sys/dev/cxgbe/cudbg/fastlz_api.c b/sys/dev/cxgbe/cudbg/fastlz_api.c index a513557ad35..3b705a76faf 100644 --- a/sys/dev/cxgbe/cudbg/fastlz_api.c +++ b/sys/dev/cxgbe/cudbg/fastlz_api.c @@ -73,50 +73,45 @@ static inline unsigned long update_adler32(unsigned long checksum, return (s2 << 16) + s1; } -int write_magic(struct cudbg_buffer *_out_buff) +int write_magic(struct cudbg_init *pdbg_init, struct cudbg_buffer *_out_buff) { - int rc; - - rc = write_to_buf(_out_buff->data, _out_buff->size, &_out_buff->offset, - sixpack_magic, 8); - - return rc; + return write_to_buf(pdbg_init, _out_buff->data, _out_buff->size, + &_out_buff->offset, sixpack_magic, 8); } -int write_to_buf(void *out_buf, u32 out_buf_size, u32 *offset, void *in_buf, - u32 in_buf_size) +int write_to_buf(struct cudbg_init *pdbg_init, void *out_buf, u32 out_buf_size, + u32 *offset, void *in_buf, u32 in_buf_size) { int rc = 0; - if (*offset >= out_buf_size) { - rc = CUDBG_STATUS_OUTBUFF_OVERFLOW; - goto err; - } + if (*offset >= out_buf_size) + return CUDBG_STATUS_OUTBUFF_OVERFLOW; - memcpy((char *)out_buf + *offset, in_buf, in_buf_size); - *offset = *offset + in_buf_size; + if (pdbg_init && pdbg_init->write_to_file_cb) { + /* Write only data to file. Header will be updated later. */ + rc = pdbg_init->write_to_file_cb(CUDBG_FILE_WRITE_DATA, 0, + (u8 *)in_buf, in_buf_size); + if (rc) + return rc; + + /* Update entity header size now since the buffer will + * be reused + */ + cudbg_update_entity_hdr(pdbg_init, in_buf_size); + } else { + memcpy((char *)out_buf + *offset, in_buf, in_buf_size); + *offset = *offset + in_buf_size; + } -err: return rc; } -int read_from_buf(void *in_buf, u32 in_buf_size, u32 *offset, void *out_buf, - u32 out_buf_size) -{ - if (in_buf_size - *offset < out_buf_size) - return 0; - - memcpy((char *)out_buf, (char *)in_buf + *offset, out_buf_size); - *offset = *offset + out_buf_size; - return out_buf_size; -} - -int write_chunk_header(struct cudbg_buffer *_outbuf, int id, int options, +int write_chunk_header(struct cudbg_init *pdbg_init, + struct cudbg_buffer *_outbuf, int id, int options, unsigned long size, unsigned long checksum, unsigned long extra) { unsigned char buffer[CUDBG_CHUNK_BUF_LEN]; - int rc = 0; buffer[0] = id & 255; buffer[1] = (unsigned char)(id >> 8); @@ -135,13 +130,12 @@ int write_chunk_header(struct cudbg_buffer *_outbuf, int id, int options, buffer[14] = (extra >> 16) & 255; buffer[15] = (extra >> 24) & 255; - rc = write_to_buf(_outbuf->data, _outbuf->size, &_outbuf->offset, - buffer, 16); - - return rc; + return write_to_buf(pdbg_init, _outbuf->data, _outbuf->size, + &_outbuf->offset, buffer, 16); } -int write_compression_hdr(struct cudbg_buffer *pin_buff, +int write_compression_hdr(struct cudbg_init *pdbg_init, + struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff) { struct cudbg_buffer tmp_buffer; @@ -151,6 +145,9 @@ int write_compression_hdr(struct cudbg_buffer *pin_buff, int rc; char *shown_name = "abc"; + if (fsize == 0) + return CUDBG_STATUS_NO_DATA; + /* Always release inner scratch buffer, before releasing outer. */ rc = get_scratch_buff(pout_buff, 10, &tmp_buffer); @@ -159,7 +156,7 @@ int write_compression_hdr(struct cudbg_buffer *pin_buff, buffer = (unsigned char *)tmp_buffer.data; - rc = write_magic(pout_buff); + rc = write_magic(pdbg_init, pout_buff); if (rc) goto err1; @@ -180,22 +177,22 @@ int write_compression_hdr(struct cudbg_buffer *pin_buff, checksum = update_adler32(checksum, shown_name, (int)strlen(shown_name)+1); - rc = write_chunk_header(pout_buff, 1, 0, + rc = write_chunk_header(pdbg_init, pout_buff, 1, 0, 10+(unsigned long)strlen(shown_name)+1, checksum, 0); if (rc) goto err1; - rc = write_to_buf(pout_buff->data, pout_buff->size, + rc = write_to_buf(pdbg_init, pout_buff->data, pout_buff->size, &(pout_buff->offset), buffer, 10); if (rc) goto err1; - rc = write_to_buf(pout_buff->data, pout_buff->size, - &(pout_buff->offset), shown_name, - (u32)strlen(shown_name)+1); + rc = write_to_buf(pdbg_init, pout_buff->data, pout_buff->size, + &(pout_buff->offset), shown_name, + (u32)strlen(shown_name)+1); if (rc) goto err1; @@ -206,8 +203,10 @@ int write_compression_hdr(struct cudbg_buffer *pin_buff, return rc; } -int compress_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff) +int compress_buff(struct cudbg_init *pdbg_init, struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pout_buff) { + unsigned char *hash_table = pdbg_init->hash_table; struct cudbg_buffer tmp_buffer; struct cudbg_hdr *cudbg_hdr; unsigned long checksum; @@ -231,22 +230,24 @@ int compress_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff) switch (compress_method) { case 1: - chunk_size = fastlz_compress_level(level, pin_buff->data, + chunk_size = fastlz_compress_level(hash_table, level, + pin_buff->data, bytes_read, result); checksum = update_adler32(1L, result, chunk_size); + /* This check is for debugging Bug #28806 */ if ((chunk_size > 62000) && (cudbg_hdr->reserved[7] < (u32) chunk_size)) /* 64512 */ cudbg_hdr->reserved[7] = (u32) chunk_size; - rc = write_chunk_header(pout_buff, 17, 1, chunk_size, checksum, - bytes_read); + rc = write_chunk_header(pdbg_init, pout_buff, 17, 1, chunk_size, + checksum, bytes_read); if (rc) goto err_put_buff; - rc = write_to_buf(pout_buff->data, pout_buff->size, + rc = write_to_buf(pdbg_init, pout_buff->data, pout_buff->size, &pout_buff->offset, result, chunk_size); if (rc) @@ -259,13 +260,13 @@ int compress_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff) default: checksum = update_adler32(1L, pin_buff->data, bytes_read); - rc = write_chunk_header(pout_buff, 17, 0, bytes_read, checksum, - bytes_read); + rc = write_chunk_header(pdbg_init, pout_buff, 17, 0, bytes_read, + checksum, bytes_read); if (rc) goto err_put_buff; - rc = write_to_buf(pout_buff->data, pout_buff->size, + rc = write_to_buf(pdbg_init, pout_buff->data, pout_buff->size, &pout_buff->offset, pin_buff->data, bytes_read); if (rc) @@ -279,253 +280,3 @@ int compress_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *pout_buff) err: return rc; } - -/* return non-zero if magic sequence is detected */ -/* warning: reset the read pointer to the beginning of the file */ -int detect_magic(struct cudbg_buffer *_c_buff) -{ - unsigned char buffer[8]; - size_t bytes_read; - int c; - - bytes_read = read_from_buf(_c_buff->data, _c_buff->size, - &_c_buff->offset, buffer, 8); - - if (bytes_read < 8) - return 0; - - for (c = 0; c < 8; c++) - if (buffer[c] != sixpack_magic[c]) - return 0; - - return -1; -} - -static inline unsigned long readU16(const unsigned char *ptr) -{ - return ptr[0]+(ptr[1]<<8); -} - -static inline unsigned long readU32(const unsigned char *ptr) -{ - return ptr[0]+(ptr[1]<<8)+(ptr[2]<<16)+(ptr[3]<<24); -} - -int read_chunk_header(struct cudbg_buffer *pc_buff, int *pid, int *poptions, - unsigned long *psize, unsigned long *pchecksum, - unsigned long *pextra) -{ - unsigned char buffer[CUDBG_CHUNK_BUF_LEN]; - int byte_r = read_from_buf(pc_buff->data, pc_buff->size, - &pc_buff->offset, buffer, 16); - if (byte_r == 0) - return 0; - - *pid = readU16(buffer) & 0xffff; - *poptions = readU16(buffer+2) & 0xffff; - *psize = readU32(buffer+4) & 0xffffffff; - *pchecksum = readU32(buffer+8) & 0xffffffff; - *pextra = readU32(buffer+12) & 0xffffffff; - return 0; -} - -int validate_buffer(struct cudbg_buffer *compressed_buffer) -{ - if (!detect_magic(compressed_buffer)) - return CUDBG_STATUS_INVALID_BUFF; - - return 0; -} - -int decompress_buffer(struct cudbg_buffer *pc_buff, - struct cudbg_buffer *pd_buff) -{ - struct cudbg_buffer tmp_compressed_buffer; - struct cudbg_buffer tmp_decompressed_buffer; - unsigned char *compressed_buffer; - unsigned char *decompressed_buffer; - unsigned char buffer[CUDBG_MIN_COMPR_LEN]; - unsigned long chunk_size; - unsigned long chunk_checksum; - unsigned long chunk_extra; - unsigned long checksum; - unsigned long total_extracted = 0; - unsigned long r; - unsigned long remaining; - unsigned long bytes_read; - u32 decompressed_size = 0; - int chunk_id, chunk_options, rc; - - if (pd_buff->size < 2 * CUDBG_BLOCK_SIZE) - return CUDBG_STATUS_SMALL_BUFF; - - rc = get_scratch_buff(pd_buff, CUDBG_BLOCK_SIZE, - &tmp_compressed_buffer); - - if (rc) - goto err_cbuff; - - rc = get_scratch_buff(pd_buff, CUDBG_BLOCK_SIZE, - &tmp_decompressed_buffer); - if (rc) - goto err_dcbuff; - - compressed_buffer = (unsigned char *)tmp_compressed_buffer.data; - decompressed_buffer = (unsigned char *)tmp_decompressed_buffer.data; - - /* main loop */ - - for (;;) { - if (pc_buff->offset > pc_buff->size) - break; - - rc = read_chunk_header(pc_buff, &chunk_id, &chunk_options, - &chunk_size, &chunk_checksum, - &chunk_extra); - if (rc != 0) - break; - - /* skip 8+16 */ - if ((chunk_id == 1) && (chunk_size > 10) && - (chunk_size < CUDBG_BLOCK_SIZE)) { - - bytes_read = read_from_buf(pc_buff->data, pc_buff->size, - &pc_buff->offset, buffer, - chunk_size); - - if (bytes_read == 0) - return 0; - - checksum = update_adler32(1L, buffer, chunk_size); - if (checksum != chunk_checksum) - return CUDBG_STATUS_CHKSUM_MISSMATCH; - - decompressed_size = (u32)readU32(buffer); - - if (pd_buff->size < decompressed_size) { - - pd_buff->size = 2 * CUDBG_BLOCK_SIZE + - decompressed_size; - pc_buff->offset -= chunk_size + 16; - return CUDBG_STATUS_SMALL_BUFF; - } - total_extracted = 0; - - } - - if (chunk_size > CUDBG_BLOCK_SIZE) { - /* Release old allocated memory */ - release_scratch_buff(&tmp_decompressed_buffer, pd_buff); - release_scratch_buff(&tmp_compressed_buffer, pd_buff); - - /* allocate new memory with chunk_size size */ - rc = get_scratch_buff(pd_buff, chunk_size, - &tmp_compressed_buffer); - if (rc) - goto err_cbuff; - - rc = get_scratch_buff(pd_buff, chunk_size, - &tmp_decompressed_buffer); - if (rc) - goto err_dcbuff; - - compressed_buffer = (unsigned char *)tmp_compressed_buffer.data; - decompressed_buffer = (unsigned char *)tmp_decompressed_buffer.data; - } - - if ((chunk_id == 17) && decompressed_size) { - /* uncompressed */ - switch (chunk_options) { - /* stored, simply copy to output */ - case 0: - total_extracted += chunk_size; - remaining = chunk_size; - checksum = 1L; - for (;;) { - /* Write a funtion for this */ - r = (CUDBG_BLOCK_SIZE < remaining) ? - CUDBG_BLOCK_SIZE : remaining; - bytes_read = - read_from_buf(pc_buff->data, - pc_buff->size, - &pc_buff->offset, buffer, - r); - - if (bytes_read == 0) - return 0; - - write_to_buf(pd_buff->data, - pd_buff->size, - &pd_buff->offset, buffer, - bytes_read); - checksum = update_adler32(checksum, - buffer, - bytes_read); - remaining -= bytes_read; - - /* verify everything is written - * correctly */ - if (checksum != chunk_checksum) - return - CUDBG_STATUS_CHKSUM_MISSMATCH; - } - - break; - - /* compressed using FastLZ */ - case 1: - bytes_read = read_from_buf(pc_buff->data, - pc_buff->size, - &pc_buff->offset, - compressed_buffer, - chunk_size); - - if (bytes_read == 0) - return 0; - - checksum = update_adler32(1L, compressed_buffer, - chunk_size); - total_extracted += chunk_extra; - - /* verify that the chunk data is correct */ - if (checksum != chunk_checksum) { - return CUDBG_STATUS_CHKSUM_MISSMATCH; - } else { - /* decompress and verify */ - remaining = - fastlz_decompress(compressed_buffer, - chunk_size, - decompressed_buffer, - chunk_extra); - - if (remaining != chunk_extra) { - rc = - CUDBG_STATUS_DECOMPRESS_FAIL; - goto err; - } else { - write_to_buf(pd_buff->data, - pd_buff->size, - &pd_buff->offset, - decompressed_buffer, - chunk_extra); - } - } - break; - - default: - break; - } - - } - - } - -err: - release_scratch_buff(&tmp_decompressed_buffer, pd_buff); -err_dcbuff: - release_scratch_buff(&tmp_compressed_buffer, pd_buff); - -err_cbuff: - return rc; -} - diff --git a/sys/dev/cxgbe/cudbg/fastlz_common.h b/sys/dev/cxgbe/cudbg/fastlz_common.h new file mode 100644 index 00000000000..7e3b0151190 --- /dev/null +++ b/sys/dev/cxgbe/cudbg/fastlz_common.h @@ -0,0 +1,141 @@ +#ifndef __FASTLZ_COMMON_H__ +#define __FASTLZ_COMMON_H__ + +#define FASTLZ_HASH_LOG 13 +#define FASTLZ_HASH_SIZE (1 << FASTLZ_HASH_LOG) +#define FASTLZ_HASH_MASK (FASTLZ_HASH_SIZE - 1) + +/* + * Always check for bound when decompressing. + * Generally it is best to leave it defined. + */ +#define FASTLZ_SAFE + +#if defined(WIN32) || defined(__NT__) || defined(_WIN32) || defined(__WIN32__) +#if defined(_MSC_VER) || defined(__GNUC__) +/* #include */ +#pragma warning(disable : 4242) +#pragma warning(disable : 4244) +/* 4214 - nonstandard extension used : bit field types other than int */ +#pragma warning(disable : 4214) +#endif +#endif + +/* + * Give hints to the compiler for branch prediction optimization. + */ +#if defined(__GNUC__) && (__GNUC__ > 2) +#define FASTLZ_EXPECT_CONDITIONAL(c) (__builtin_expect((c), 1)) +#define FASTLZ_UNEXPECT_CONDITIONAL(c) (__builtin_expect((c), 0)) +#else +#define FASTLZ_EXPECT_CONDITIONAL(c) (c) +#define FASTLZ_UNEXPECT_CONDITIONAL(c) (c) +#endif + +/* + * Use inlined functions for supported systems. + */ +#if defined(__GNUC__) || defined(__DMC__) || defined(__POCC__) ||\ + defined(__WATCOMC__) || defined(__SUNPRO_C) +#define FASTLZ_INLINE inline +#elif defined(__BORLANDC__) || defined(_MSC_VER) || defined(__LCC__) +#define FASTLZ_INLINE __inline +#else +#define FASTLZ_INLINE +#endif + +/* + * Prevent accessing more than 8-bit at once, except on x86 architectures. + */ +#if !defined(FASTLZ_STRICT_ALIGN) +#define FASTLZ_STRICT_ALIGN +#if defined(__i386__) || defined(__386) /* GNU C, Sun Studio */ +#undef FASTLZ_STRICT_ALIGN +#elif defined(__i486__) || defined(__i586__) || defined(__i686__) /* GNU C */ +#undef FASTLZ_STRICT_ALIGN +#elif defined(_M_IX86) /* Intel, MSVC */ +#undef FASTLZ_STRICT_ALIGN +#elif defined(__386) +#undef FASTLZ_STRICT_ALIGN +#elif defined(_X86_) /* MinGW */ +#undef FASTLZ_STRICT_ALIGN +#elif defined(__I86__) /* Digital Mars */ +#undef FASTLZ_STRICT_ALIGN +#endif +#endif + +/* + * FIXME: use preprocessor magic to set this on different platforms! + */ + +#define MAX_COPY 32 +#define MAX_LEN 264 /* 256 + 8 */ +#define MAX_DISTANCE 8192 + +#if !defined(FASTLZ_STRICT_ALIGN) +#define FASTLZ_READU16(p) (*((const unsigned short *)(p))) +#else +#define FASTLZ_READU16(p) ((p)[0] | (p)[1]<<8) +#endif + +#define HASH_FUNCTION(v, p) {\ + v = FASTLZ_READU16(p);\ + v ^= FASTLZ_READU16(p + 1)^\ + (v>>(16 - FASTLZ_HASH_LOG));\ + v &= FASTLZ_HASH_MASK;\ + } + +extern unsigned char sixpack_magic[8]; + +#define CUDBG_BLOCK_SIZE (63*1024) +#define CUDBG_CHUNK_BUF_LEN 16 +#define CUDBG_MIN_COMPR_LEN 32 /*min data length for applying compression*/ + +/* + * Use inlined functions for supported systems. + */ +#if defined(__GNUC__) || defined(__DMC__) || defined(__POCC__) || \ + defined(__WATCOMC__) || defined(__SUNPRO_C) + +#elif defined(__BORLANDC__) || defined(_MSC_VER) || defined(__LCC__) +#define inline __inline +#else +#define inline +#endif + +/* for Adler-32 checksum algorithm, see RFC 1950 Section 8.2 */ + +#define ADLER32_BASE 65521 + +static inline unsigned long update_adler32(unsigned long checksum, + const void *buf, int len) +{ + const unsigned char *ptr = (const unsigned char *)buf; + unsigned long s1 = checksum & 0xffff; + unsigned long s2 = (checksum >> 16) & 0xffff; + + while (len > 0) { + unsigned k = len < 5552 ? len : 5552; + len -= k; + + while (k >= 8) { + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + s1 += *ptr++; s2 += s1; + k -= 8; + } + + while (k-- > 0) { + s1 += *ptr++; s2 += s1; + } + s1 = s1 % ADLER32_BASE; + s2 = s2 % ADLER32_BASE; + } + return (s2 << 16) + s1; +} +#endif /* __FASTLZ_COMMON_H__ */ diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h index f3bb7d8b4aa..82d94deaaed 100644 --- a/sys/dev/cxgbe/t4_ioctl.h +++ b/sys/dev/cxgbe/t4_ioctl.h @@ -66,6 +66,7 @@ enum { T4_SET_FILTER_MASK, /* set filter mask (hashfilter mode) */ T4_HOLD_CLIP_ADDR, /* add ref on an IP in the CLIP */ T4_RELEASE_CLIP_ADDR, /* remove ref from an IP in the CLIP */ + T4_CUDBG_DUMP_COPY, /* copy out any saved debug dump */ }; struct t4_reg { @@ -441,4 +442,5 @@ struct t4_clip_addr { #define CHELSIO_T4_SET_FILTER_MASK _IOW('f', T4_SET_FILTER_MASK, uint32_t) #define CHELSIO_T4_HOLD_CLIP_ADDR _IOW('f', T4_HOLD_CLIP_ADDR, struct t4_clip_addr) #define CHELSIO_T4_RELEASE_CLIP_ADDR _IOW('f', T4_RELEASE_CLIP_ADDR, struct t4_clip_addr) +#define CHELSIO_T4_CUDBG_DUMP_COPY _IOWR('f', T4_CUDBG_DUMP_COPY, struct t4_cudbg_dump) #endif diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index 67500b0c44b..e2ea6cdb77d 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -608,6 +608,16 @@ SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN, &pcie_relaxed_ordering, 0, "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone"); +static int t4_dump_on_fatal_err = 1; /* XXX: for testing. */ +SYSCTL_INT(_hw_cxgbe, OID_AUTO, dump_on_fatal_err, CTLFLAG_RWTUN, + &t4_dump_on_fatal_err, 0, "generate debug dump on fatal errors"); + +static int t4_dump_to_card_flash = 0; +SYSCTL_INT(_hw_cxgbe, OID_AUTO, dump_to_card_flash, CTLFLAG_RWTUN, + &t4_dump_to_card_flash, 0, "write debug dump to card's flash"); + +static uint8_t t4_cudbg_bitmap[CUDBG_MAX_BITMAP_LEN]; + static int t4_panic_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN, &t4_panic_on_fatal_err, 0, "panic on fatal errors"); @@ -832,7 +842,12 @@ static int load_fw(struct adapter *, struct t4_data *); static int load_cfg(struct adapter *, struct t4_data *); static int load_boot(struct adapter *, struct t4_bootrom *); static int load_bootcfg(struct adapter *, struct t4_data *); +static void cudbg_reg_lock(void *); +static void cudbg_reg_unlock(void *); +static int cudbg_read_mem(struct cudbg_init *, uint8_t, uint32_t, uint32_t, + uint8_t *); static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); +static int cudbg_dump_copy(struct adapter *, struct t4_cudbg_dump *); static void free_offload_policy(struct t4_offload_policy *); static int set_offload_policy(struct adapter *, struct t4_offload_policy *); static int read_card_mem(struct adapter *, int, struct t4_mem_range *); @@ -842,7 +857,7 @@ static int hold_clip_addr(struct adapter *, struct t4_clip_addr *); static int release_clip_addr(struct adapter *, struct t4_clip_addr *); #ifdef TCP_OFFLOAD static int toe_capability(struct vi_info *, bool); -static void t4_async_event(void *, int); +static void t4_async_event(struct adapter *); #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *, bool); @@ -853,6 +868,7 @@ static uint64_t vi_get_counter(struct ifnet *, ift_counter); static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); static void enable_vxlan_rx(struct adapter *); static void reset_adapter(void *, int); +static void fatal_err_task(void *, int); struct { uint16_t device; @@ -1155,13 +1171,14 @@ t4_attach(device_t dev) callout_init(&sc->ktls_tick, 1); -#ifdef TCP_OFFLOAD - TASK_INIT(&sc->async_event_task, 0, t4_async_event, sc); -#endif - refcount_init(&sc->vxlan_refcount, 0); TASK_INIT(&sc->reset_task, 0, reset_adapter, sc); + TASK_INIT(&sc->fatal_err_task, 0, fatal_err_task, sc); + + mtx_init(&sc->dump_lock, "debug dumps", 0, MTX_DEF); + cv_init(&sc->dump_cv, "debug dumps cv"); + STAILQ_INIT(&sc->dump_list); sc->ctrlq_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq", @@ -1694,9 +1711,8 @@ t4_detach_common(device_t dev) } } -#ifdef TCP_OFFLOAD - taskqueue_drain(taskqueue_thread, &sc->async_event_task); -#endif + taskqueue_drain(taskqueue_thread, &sc->reset_task); + taskqueue_drain(taskqueue_thread, &sc->fatal_err_task); for (i = 0; i < sc->intr_count; i++) t4_free_irq(sc, &sc->irq[i]); @@ -2226,6 +2242,7 @@ t4_resume(device_t dev) mtx_lock(&sc->reg_lock); sc->flags &= ~HW_OFF_LIMITS; mtx_unlock(&sc->reg_lock); + atomic_store_int(&sc->ignore_err_intr, 0); if (sc->flags & FULL_INIT_DONE) { rc = adapter_full_init(sc); @@ -3518,26 +3535,106 @@ delayed_panic(void *arg) panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); } -void -t4_fatal_err(struct adapter *sc, bool fw_error) +#define CUDBG_SCRATCH_BUFFER_LEN (1024 * 1024) +static void +generate_cudbg_dump(struct adapter *sc) { + int rc; + struct cudbg_init ci = {0}, *cudbg = &ci; /* XXX: ~550B */ + void *handle = NULL, *buf = NULL, *cudbg_buf = NULL; + uint32_t cudbg_buflen, dump_len; + struct saved_cudbg_dump *saved_dump; - t4_shutdown_adapter(sc); - log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n", - device_get_nameunit(sc->dev)); - if (fw_error) { - if (sc->flags & CHK_MBOX_ACCESS) - ASSERT_SYNCHRONIZED_OP(sc); - sc->flags |= ADAP_ERR; + /* buf is large, don't block if no memory is available */ + dump_len = CUDBG_SCRATCH_BUFFER_LEN; + buf = malloc(dump_len, M_CXGBE, M_NOWAIT | M_ZERO); + if (buf == NULL) { + rc = ENOMEM; + goto done; + } + + cudbg_buflen = 132000; + cudbg_buf = malloc(cudbg_buflen, M_CXGBE, M_NOWAIT | M_ZERO); + if (cudbg_buf == NULL) { + rc = ENOMEM; + goto done; + } + + init_cudbg_hdr(&cudbg->header); + cudbg->adap = sc; + cudbg->print = (cudbg_print_cb)printf; + cudbg->mc_collect_cb = cudbg_read_mem; + cudbg->write_to_file_cb = NULL; + cudbg->sw_state_buf = NULL; + cudbg->sw_state_buflen = 0; + cudbg->access_lock = &sc->reg_lock; + cudbg->lock_cb = cudbg_reg_lock; + cudbg->unlock_cb = cudbg_reg_unlock; + cudbg->yield_cb = NULL; + if (t4_dump_to_card_flash) + cudbg->use_flash = 1; + if (sc->debug_flags & DF_VERBOSE_CUDBG) + cudbg->verbose = 1; +#if 1 + memset(t4_cudbg_bitmap, 0xff, sizeof(t4_cudbg_bitmap)); + clrbit(t4_cudbg_bitmap, CUDBG_ALL); + clrbit(t4_cudbg_bitmap, CUDBG_EDC0); + clrbit(t4_cudbg_bitmap, CUDBG_EDC1); + clrbit(t4_cudbg_bitmap, CUDBG_MC0); + clrbit(t4_cudbg_bitmap, CUDBG_MC1); +#endif + MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(t4_cudbg_bitmap)); + memcpy(cudbg->dbg_bitmap, t4_cudbg_bitmap, sizeof(cudbg->dbg_bitmap)); + + rc = cudbg_hello2(cudbg, &handle, cudbg_buf, &cudbg_buflen); + if (rc != 0) { + CH_ALERT(sc, "sizeof(cudbg_init) = %zd, cudbg_buflen %d.\n", + sizeof(ci), cudbg_buflen); + goto done; + } + + rc = cudbg_collect(handle, buf, &dump_len); + if (rc != 0) { + CH_ALERT(sc, "failed to generate debug dump: %d. " + "scratch buffer %d, dump_len %d\n", rc, + CUDBG_SCRATCH_BUFFER_LEN, dump_len); } else { - ADAPTER_LOCK(sc); - sc->flags |= ADAP_ERR; - ADAPTER_UNLOCK(sc); + saved_dump = malloc(sizeof(*saved_dump) + dump_len, M_CXGBE, + M_NOWAIT); + if (saved_dump == NULL) { + rc = ENOMEM; + goto done; + } + memcpy(saved_dump->dump_buf, buf, dump_len); + saved_dump->dump_len = dump_len; + CH_ALERT(sc, "generated debug dump of %dB @ %p\n", dump_len, + saved_dump->dump_buf); + + mtx_lock(&sc->dump_lock); + STAILQ_INSERT_TAIL(&sc->dump_list, saved_dump, link); + cv_signal(&sc->dump_cv); + mtx_unlock(&sc->dump_lock); } +done: + if (handle != NULL) + cudbg_bye(handle); + free(cudbg_buf, M_CXGBE); + free(buf, M_CXGBE); +} + +static void +fatal_err_task(void *arg, int pending) +{ + struct adapter *sc = arg; + #ifdef TCP_OFFLOAD - taskqueue_enqueue(taskqueue_thread, &sc->async_event_task); + /* Let the ULDs know that there is a problem. */ + t4_async_event(sc); #endif + if (t4_dump_on_fatal_err) + generate_cudbg_dump(sc); + if (t4_panic_on_fatal_err) { CH_ALERT(sc, "panicking on fatal error (after 30s).\n"); callout_reset(&fatal_callout, hz * 30, delayed_panic, sc); @@ -3547,6 +3644,20 @@ t4_fatal_err(struct adapter *sc, bool fw_error) } } +void +t4_fatal_err(struct adapter *sc) +{ + /* + * Stop the adapter immediately but defer rest of the handling to a + * different thread. The current thread is either an ithread (err intr) + * or is already in a synchronized op (mbox error). + */ + t4_shutdown_adapter(sc); + atomic_store_int(&sc->ignore_err_intr, 1); + taskqueue_enqueue(taskqueue_thread, &sc->fatal_err_task); + CH_ALERT(sc, "encountered fatal error, adapter stopped.\n"); +} + void t4_add_adapter(struct adapter *sc) { @@ -9117,13 +9228,15 @@ sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) if (sb == NULL) return (ENOMEM); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4cpst"); + if (rc != 0) + goto done; if (hw_off_limits(sc)) rc = ENXIO; else - t4_tp_get_cpl_stats(sc, &stats, 0); - mtx_unlock(&sc->reg_lock); - if (rc) + t4_tp_get_cpl_stats(sc, &stats, 1); + end_synchronized_op(sc, 0); + if (rc != 0) goto done; if (sc->chip_params->nchan > 2) { @@ -9163,20 +9276,22 @@ sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) if (sb == NULL) return (ENOMEM); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ddst"); + if (rc != 0) + goto done; if (hw_off_limits(sc)) rc = ENXIO; else t4_get_usm_stats(sc, &stats, 1); - mtx_unlock(&sc->reg_lock); - if (rc == 0) { - sbuf_printf(sb, "Frames: %u\n", stats.frames); - sbuf_printf(sb, "Octets: %ju\n", stats.octets); - sbuf_printf(sb, "Drops: %u", stats.drops); - rc = sbuf_finish(sb); - } + end_synchronized_op(sc, 0); + if (rc != 0) + goto done; + sbuf_printf(sb, "Frames: %u\n", stats.frames); + sbuf_printf(sb, "Octets: %ju\n", stats.octets); + sbuf_printf(sb, "Drops: %u", stats.drops); + rc = sbuf_finish(sb); +done: sbuf_delete(sb); - return (rc); } @@ -9196,21 +9311,23 @@ sysctl_tid_stats(SYSCTL_HANDLER_ARGS) if (sb == NULL) return (ENOMEM); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tist"); + if (rc != 0) + goto done; if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tid_stats(sc, &stats, 1); - mtx_unlock(&sc->reg_lock); - if (rc == 0) { - sbuf_printf(sb, "Delete: %u\n", stats.del); - sbuf_printf(sb, "Invalidate: %u\n", stats.inv); - sbuf_printf(sb, "Active: %u\n", stats.act); - sbuf_printf(sb, "Passive: %u", stats.pas); - rc = sbuf_finish(sb); - } + end_synchronized_op(sc, 0); + if (rc != 0) + goto done; + sbuf_printf(sb, "Delete: %u\n", stats.del); + sbuf_printf(sb, "Invalidate: %u\n", stats.inv); + sbuf_printf(sb, "Active: %u\n", stats.act); + sbuf_printf(sb, "Passive: %u", stats.pas); + rc = sbuf_finish(sb); +done: sbuf_delete(sb); - return (rc); } @@ -9378,14 +9495,16 @@ sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) if (rc != 0) return (rc); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4fcst"); + if (rc != 0) + return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { for (i = 0; i < nchan; i++) t4_get_fcoe_stats(sc, i, &stats[i], 1); } - mtx_unlock(&sc->reg_lock); + end_synchronized_op(sc, 0); if (rc != 0) return (rc); @@ -9438,7 +9557,9 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS) if (sb == NULL) return (ENOMEM); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4hwsc"); + if (rc != 0) + goto done; if (hw_off_limits(sc)) { rc = ENXIO; goto done; @@ -9472,7 +9593,7 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS) } rc = sbuf_finish(sb); done: - mtx_unlock(&sc->reg_lock); + end_synchronized_op(sc, 0); sbuf_delete(sb); return (rc); } @@ -10262,12 +10383,14 @@ sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) if (rc != 0) return (rc); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4rdst"); + if (rc != 0) + return (rc); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_rdma_stats(sc, &stats, 0); - mtx_unlock(&sc->reg_lock); + end_synchronized_op(sc, 0); if (rc != 0) return (rc); @@ -10296,12 +10419,14 @@ sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) if (rc != 0) return (rc); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tcst"); + if (rc != 0) + return (rc); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tcp_stats(sc, &v4, &v6, 0); - mtx_unlock(&sc->reg_lock); + end_synchronized_op(sc, 0); if (rc != 0) return (rc); @@ -10434,12 +10559,14 @@ sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) if (rc != 0) return (rc); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4trst"); + if (rc != 0) + return (rc); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_err_stats(sc, &stats, 0); - mtx_unlock(&sc->reg_lock); + end_synchronized_op(sc, 0); if (rc != 0) return (rc); @@ -10515,12 +10642,14 @@ sysctl_tnl_stats(SYSCTL_HANDLER_ARGS) if (rc != 0) return(rc); - mtx_lock(&sc->reg_lock); + rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tsst"); + if (rc != 0) + return (rc); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tnl_stats(sc, &stats, 1); - mtx_unlock(&sc->reg_lock); + end_synchronized_op(sc, 0); if (rc != 0) return (rc); @@ -11568,49 +11697,137 @@ load_bootcfg(struct adapter *sc, struct t4_data *bc) return (rc); } +static void +cudbg_reg_lock(void *arg) +{ + struct mtx *reg_lock = arg; + + mtx_lock(reg_lock); +} + +static void +cudbg_reg_unlock(void *arg) +{ + struct mtx *reg_lock = arg; + + mtx_unlock(reg_lock); +} + +/* + * Returns -ve errno on error. + */ +static int +cudbg_read_mem(struct cudbg_init *pdbg_init, uint8_t mem_type, uint32_t start, + uint32_t size, uint8_t *buf) +{ + struct adapter *sc = pdbg_init->adap; + uint32_t addr; + int rc; + + rc = -validate_mt_off_len(sc, mem_type, start, size, &addr); + if (rc == 0) + rc = -read_via_memwin(sc, 1, addr, (void *)buf, size); + + return (rc); +} + static int cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) { int rc; - struct cudbg_init *cudbg; - void *handle, *buf; + struct cudbg_init ci = {0}, *cudbg = &ci; /* XXX: ~550B */ + void *handle = NULL, *buf = NULL, *cudbg_buf = NULL; + uint32_t cudbg_buflen; /* buf is large, don't block if no memory is available */ buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); - if (buf == NULL) - return (ENOMEM); + if (buf == NULL) { + rc = ENOMEM; + goto done; + } - handle = cudbg_alloc_handle(); - if (handle == NULL) { + cudbg_buflen = 132000; + cudbg_buf = malloc(cudbg_buflen, M_CXGBE, M_NOWAIT | M_ZERO); + if (cudbg_buf == NULL) { rc = ENOMEM; goto done; } - cudbg = cudbg_get_init(handle); + init_cudbg_hdr(&cudbg->header); cudbg->adap = sc; cudbg->print = (cudbg_print_cb)printf; - -#ifndef notyet - device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", - __func__, dump->wr_flash, dump->len, dump->data); -#endif - + cudbg->mc_collect_cb = cudbg_read_mem; + cudbg->write_to_file_cb = NULL; + cudbg->sw_state_buf = NULL; + cudbg->sw_state_buflen = 0; + cudbg->access_lock = &sc->reg_lock; + cudbg->lock_cb = cudbg_reg_lock; + cudbg->unlock_cb = cudbg_reg_unlock; + cudbg->yield_cb = NULL; if (dump->wr_flash) cudbg->use_flash = 1; + if (sc->debug_flags & DF_VERBOSE_CUDBG) + cudbg->verbose = 1; MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); - rc = cudbg_collect(handle, buf, &dump->len); - if (rc != 0) + rc = cudbg_hello2(cudbg, &handle, cudbg_buf, &cudbg_buflen); + if (rc != 0) { + CH_ALERT(sc, "sizeof(cudbg_init) = %zd, cudbg_buflen %d.\n", + sizeof(ci), cudbg_buflen); goto done; + } - rc = copyout(buf, dump->data, dump->len); + rc = cudbg_collect(handle, buf, &dump->len); + if (rc == 0) + rc = copyout(buf, dump->data, dump->len); done: - cudbg_free_handle(handle); + if (handle != NULL) + cudbg_bye(handle); + free(cudbg_buf, M_CXGBE); free(buf, M_CXGBE); return (rc); } +static int +cudbg_dump_copy(struct adapter *sc, struct t4_cudbg_dump *dump) +{ + struct saved_cudbg_dump *scd = NULL; + int rc = 0; + + mtx_lock(&sc->dump_lock); + while ((scd = STAILQ_FIRST(&sc->dump_list)) == NULL) { + rc = cv_wait_sig(&sc->dump_cv, &sc->dump_lock); + if (rc == ERESTART) + continue; + if (rc != 0) + break; + } + if (scd != NULL && rc == 0) { + if (dump->len < scd->dump_len) + rc = ENOMEM; + else + STAILQ_REMOVE_HEAD(&sc->dump_list, link); + dump->len = scd->dump_len; + } + mtx_unlock(&sc->dump_lock); + if (rc != 0) + return (rc); + + MPASS(scd != NULL); + rc = copyout(scd->dump_buf, dump->data, dump->len); + if (rc == 0) + free(scd, M_CXGBE); + else { + /* Put it back. */ + mtx_lock(&sc->dump_lock); + STAILQ_INSERT_HEAD(&sc->dump_list, scd, link); + mtx_unlock(&sc->dump_lock); + } + + return (rc); +} + static void free_offload_policy(struct t4_offload_policy *op) { @@ -12078,6 +12295,20 @@ t4_os_link_changed(struct port_info *pi) } } +void t4_os_reg_lock(struct adapter *sc) +{ + mtx_lock(&sc->reg_lock); +} + +void t4_os_reg_lock_assert(struct adapter *sc) +{ + mtx_assert(&sc->reg_lock, MA_OWNED); +} + +void t4_os_reg_unlock(struct adapter *sc) +{ + mtx_unlock(&sc->reg_lock); +} void t4_iterate(void (*func)(struct adapter *, void *), void *arg) { @@ -12235,6 +12466,9 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, case CHELSIO_T4_RELEASE_CLIP_ADDR: rc = release_clip_addr(sc, (struct t4_clip_addr *)data); break; + case CHELSIO_T4_CUDBG_DUMP_COPY: + rc = cudbg_dump_copy(sc, (struct t4_cudbg_dump *)data); + break; default: rc = ENOTTY; } @@ -12466,10 +12700,9 @@ t4_deactivate_uld(struct adapter *sc, int id) } static void -t4_async_event(void *arg, int n) +t4_async_event(struct adapter *sc) { struct uld_info *ui; - struct adapter *sc = (struct adapter *)arg; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0) return; diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c index 1d0e334896b..3552f546d07 100644 --- a/sys/dev/cxgbe/t4_sge.c +++ b/sys/dev/cxgbe/t4_sge.c @@ -1302,7 +1302,7 @@ t4_intr_err(void *arg) uint32_t v; const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; - if (sc->flags & ADAP_ERR) + if (atomic_load_int(&sc->ignore_err_intr) != 0) return; v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); diff --git a/usr.sbin/cxgbetool/cxgbetool.c b/usr.sbin/cxgbetool/cxgbetool.c index 77f092123de..870a8e17f78 100644 --- a/usr.sbin/cxgbetool/cxgbetool.c +++ b/usr.sbin/cxgbetool/cxgbetool.c @@ -52,7 +52,13 @@ __FBSDID("$FreeBSD$"); #include #include +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + #include "t4_ioctl.h" +#include "cudbg/cudbg.h" #include "tcb_common.h" #define in_range(val, lo, hi) ( val < 0 || (val <= hi && val >= lo)) @@ -96,6 +102,7 @@ usage(FILE *fp) "\tclip hold|release hold/release an address\n" "\tclip list list the CLIP table\n" "\tcontext show an SGE context\n" + "\tdumpcopy copy out saved dump\n" "\tdumpstate dump chip state\n" "\tfilter [ ] ... set a filter\n" "\tfilter delete|clear [prio 1] delete a filter\n" @@ -2067,6 +2074,9 @@ dumpstate(int argc, const char *argv[]) dump.wr_flash = 0; memset(&dump.bitmap, 0xff, sizeof(dump.bitmap)); + clrbit(&dump.bitmap, CUDBG_ALL); + clrbit(&dump.bitmap, CUDBG_MC0); + clrbit(&dump.bitmap, CUDBG_MC1); dump.len = 8 * 1024 * 1024; dump.data = malloc(dump.len); if (dump.data == NULL) { @@ -2091,6 +2101,42 @@ dumpstate(int argc, const char *argv[]) return (rc); } +static int +dumpcopy(int argc, const char *argv[]) +{ + int rc, fd; + struct t4_cudbg_dump dump = {0}; + const char *fname = argv[0]; + + if (argc != 1) { + warnx("dumpcopy: incorrect number of arguments."); + return (EINVAL); + } + + dump.len = 8 * 1024 * 1024; + dump.data = malloc(dump.len); + if (dump.data == NULL) { + return (ENOMEM); + } + + rc = doit(CHELSIO_T4_CUDBG_DUMP_COPY, &dump); + if (rc != 0) + goto done; + + fd = open(fname, O_CREAT | O_TRUNC | O_EXCL | O_WRONLY, + S_IRUSR | S_IRGRP | S_IROTH); + if (fd < 0) { + warn("open(%s)", fname); + rc = errno; + goto done; + } + write(fd, dump.data, dump.len); + close(fd); +done: + free(dump.data); + return (rc); +} + static int read_mem(uint32_t addr, uint32_t len, void (*output)(uint32_t *, uint32_t)) { @@ -3625,6 +3671,8 @@ run_cmd(int argc, const char *argv[]) rc = filter_cmd(argc, argv, 1); else if (!strcmp(cmd, "clip")) rc = clip_cmd(argc, argv); + else if (!strcmp(cmd, "dumpcopy")) + rc = dumpcopy(argc, argv); else { rc = EINVAL; warnx("invalid command \"%s\"", cmd);