@@ -1376,193 +1376,192 @@ static inline struct Scsi_Host *bnx2i_sess_get_shost(struct bnx2i_sess *sess)
extern struct bnx2i_hba *bnx2i_map_pcidev_to_hba(struct pci_dev *pdev);
static inline char *pci_alloc_consistent_esx(struct pci_dev *pdev, size_t size,
dma_addr_t *mapping)
{
char *virt_mem;
VMK_ReturnStatus status;
#if (VMWARE_ESX_DDK_VERSION == 41000)
vmk_MachPage pfn;
vmk_MemPoolAllocProps pool_alloc_props;
#else
vmk_MPN pfn;
vmk_MpnRange range;
vmk_MemPoolAllocRequest alloc_request;
#endif /* (VMWARE_ESX_DDK_VERSION == 41000) */
struct bnx2i_hba *hba = bnx2i_map_pcidev_to_hba(pdev);
if (!hba || !hba->bnx2i_pool)
return NULL;
pool_alloc_props.alignment = 0;
if (dma_get_required_mask(&pdev->dev) >= pdev->dma_mask)
pool_alloc_props.maxPage = VMK_MEMPOOL_MAXPAGE_LOW;
else
pool_alloc_props.maxPage = VMK_MEMPOOL_MAXPAGE_ANY;
status = vmk_MemPoolAlloc(hba->bnx2i_pool, &pool_alloc_props,
1 << get_order(size), VMK_FALSE, &pfn);
pool_alloc_props.physContiguity = VMK_MEM_PHYS_CONTIGUOUS;
pool_alloc_props.physRange = VMK_PHYS_ADDR_ANY;
pool_alloc_props.creationTimeoutMS = VMK_TIMEOUT_NONBLOCKING;
pool_alloc_props.physRange = VMK_PHYS_ADDR_BELOW_4GB;
alloc_request.numPages = 1 << get_order(size);
alloc_request.numElements = 1;
alloc_request.mpnRanges = ⦥
status = vmk_MemPoolAlloc(hba->bnx2i_pool, &pool_alloc_props, &alloc_request);
if (unlikely(status != VMK_OK)) {
printk("allocation failed size=%lu\n", size);
}
#if (VMWARE_ESX_DDK_VERSION != 41000)
pfn = range.startMPN;
#endif /* (VMWARE_ESX_DDK_VERSION != 41000) */
virt_mem = page_to_virt(pfn_to_page(pfn));
memset(virt_mem, 0, size);
*mapping = pci_map_single(pdev, virt_mem, size, PCI_DMA_BIDIRECTIONAL);
return virt_mem;
static inline void pci_free_consistent_esx(struct pci_dev *pdev, size_t size,
void *virt, dma_addr_t mapping)
#endif
if (!hba) {
printk("pci_free_consistent_esx: could not find the hba associated with the dma mapping to free.\n");
return;
pci_unmap_single(pdev, mapping, size, PCI_DMA_BIDIRECTIONAL);
pfn = virt_to_page(virt);
vmk_MemPoolFree(&pfn);
range.startMPN = virt_to_page(virt);
range.numPages = 1 << get_order(size);
alloc_request.numPages = range.numPages;
vmk_MemPoolFree(&alloc_request);
#if (VMWARE_ESX_DDK_VERSION >= 60000)
static inline void bnx2i_int_to_scsilun_with_sec_lun_id(uint16_t lun,
struct scsi_lun *scsi_lun,
uint64_t sllid)
if (sllid != VMKLNX_SCSI_INVALID_SECONDLEVEL_ID) {
VMK_ASSERT(lun <= 255); /* Max LUN supported is 255. */
memset(scsi_lun, 0, 8);
scsi_lun->scsi_lun[0] = (lun >> 8) & 0xFF;
scsi_lun->scsi_lun[1] = lun & 0xFF;
scsi_lun->scsi_lun[2] = (uint8_t)((sllid >> 56) & 0xFF); /* sllid msb */
scsi_lun->scsi_lun[3] = (uint8_t)((sllid >> 48) & 0xFF);
scsi_lun->scsi_lun[4] = (uint8_t)((sllid >> 40) & 0xFF);
scsi_lun->scsi_lun[5] = (uint8_t)((sllid >> 32) & 0xFF);
scsi_lun->scsi_lun[6] = (uint8_t)((sllid >> 24) & 0xFF);
scsi_lun->scsi_lun[7] = (uint8_t)((sllid >> 16) & 0xFF); /* sllid lsb */
} else {
int_to_scsilun(lun, scsi_lun);
#endif /* (VMWARE_ESX_DDK_VERSION >= 60000) */
#endif /* __VMKLNX__ */
extern unsigned int cmd_cmpl_per_work;
extern unsigned int max_bnx2x_sessions;
extern unsigned int max_bnx2_sessions;
#ifdef __VMKLNX__
extern int bnx2i_max_sectors;
/*
* Function Prototypes
*/
extern int bnx2i_reg_device;
void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev);
void bnx2i_register_device(struct bnx2i_hba *hba, int force);
void bnx2i_check_nx2_dev_busy(void);
void bnx2i_get_link_state(struct bnx2i_hba *hba);
void bnx2i_ep_disconnect(vmk_int64 ep_handle);
struct bnx2i_hba *bnx2i_map_netdev_to_hba(struct net_device *netdev);
void bnx2i_ep_disconnect(uint64_t ep_handle);
void bnx2i_ulp_init(struct cnic_dev *dev);
void bnx2i_ulp_exit(struct cnic_dev *dev);
void bnx2i_start(void *handle);
void bnx2i_stop(void *handle);
void bnx2i_reg_dev_all(void);
void bnx2i_unreg_dev_all(void);
struct bnx2i_hba *get_adapter_list_head(void);
void bnx2i_add_hba_to_adapter_list(struct bnx2i_hba *hba);
void bnx2i_remove_hba_from_adapter_list(struct bnx2i_hba *hba);
int bnx2i_ioctl_init(void);
void bnx2i_ioctl_cleanup(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid);
int bnx2i_alloc_ep_pool(void);
void bnx2i_release_ep_pool(void);
struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
struct bnx2i_cmd *bnx2i_alloc_cmd(struct bnx2i_sess *sess);
void bnx2i_free_cmd(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd);
int bnx2i_tcp_conn_active(struct bnx2i_conn *conn);
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
struct bnx2i_hba *bnx2i_get_hba_from_template(
struct scsi_transport_template *scsit);
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
void bnx2i_free_hba(struct bnx2i_hba *hba);
int bnx2i_process_new_cqes(struct bnx2i_conn *conn, int soft_irq, int num_cqes);
void bnx2i_process_scsi_resp(struct bnx2i_cmd *cmd,
struct iscsi_cmd_response *resp_cqe);
int bnx2i_process_nopin(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd, char *data_buf, int data_len);
void bnx2i_update_cmd_sequence(struct bnx2i_sess *sess, u32 expsn, u32 maxsn);
void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
int bnx2i_indicate_login_resp(struct bnx2i_conn *conn);
int bnx2i_indicate_logout_resp(struct bnx2i_conn *conn);
int bnx2i_indicate_async_mesg(struct bnx2i_conn *conn);
void bnx2i_iscsi_unmap_sg_list(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd);
void bnx2i_iscsi_hba_cleanup(struct bnx2i_hba *hba);
void bnx2i_start_iscsi_hba_shutdown(struct bnx2i_hba *hba);
void bnx2i_iscsi_handle_ip_event(struct bnx2i_hba *hba);
int bnx2i_do_iscsi_sess_recovery(struct bnx2i_sess *sess, int err_code, int signal);
void bnx2i_return_failed_command(struct bnx2i_sess *sess,
struct scsi_cmnd *cmd, int resid, int err_code);
void bnx2i_fail_cmd(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd);
int bnx2i_complete_cmd(struct bnx2i_sess *sess, struct bnx2i_cmd *cmd);