Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,8 @@
config SCSI_QLA_ISCSI
tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
select ISCSI_BOOT_SYSFS
---help---
This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
and 8032 (ISP83XX) iSCSI host adapter family.

View file

@ -0,0 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,371 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL483XX_H
#define __QL483XX_H
/* Indirectly Mapped Registers */
#define QLA83XX_FLASH_SPI_STATUS 0x2808E010
#define QLA83XX_FLASH_SPI_CONTROL 0x2808E014
#define QLA83XX_FLASH_STATUS 0x42100004
#define QLA83XX_FLASH_CONTROL 0x42110004
#define QLA83XX_FLASH_ADDR 0x42110008
#define QLA83XX_FLASH_WRDATA 0x4211000C
#define QLA83XX_FLASH_RDDATA 0x42110018
#define QLA83XX_FLASH_DIRECT_WINDOW 0x42110030
#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
/* Directly Mapped Registers in 83xx register table */
/* Flash access regs */
#define QLA83XX_FLASH_LOCK 0x3850
#define QLA83XX_FLASH_UNLOCK 0x3854
#define QLA83XX_FLASH_LOCK_ID 0x3500
/* Driver Lock regs */
#define QLA83XX_DRV_LOCK 0x3868
#define QLA83XX_DRV_UNLOCK 0x386C
#define QLA83XX_DRV_LOCK_ID 0x3504
#define QLA83XX_DRV_LOCKRECOVERY 0x379C
/* IDC version */
#define QLA83XX_IDC_VER_MAJ_VALUE 0x1
#define QLA83XX_IDC_VER_MIN_VALUE 0x0
/* IDC Registers : Driver Coexistence Defines */
#define QLA83XX_CRB_IDC_VER_MAJOR 0x3780
#define QLA83XX_CRB_IDC_VER_MINOR 0x3798
#define QLA83XX_IDC_DRV_CTRL 0x3790
#define QLA83XX_IDC_DRV_AUDIT 0x3794
#define QLA83XX_SRE_SHIM_CONTROL 0x0D200284
#define QLA83XX_PORT0_RXB_PAUSE_THRS 0x0B2003A4
#define QLA83XX_PORT1_RXB_PAUSE_THRS 0x0B2013A4
#define QLA83XX_PORT0_RXB_TC_MAX_CELL 0x0B200388
#define QLA83XX_PORT1_RXB_TC_MAX_CELL 0x0B201388
#define QLA83XX_PORT0_RXB_TC_STATS 0x0B20039C
#define QLA83XX_PORT1_RXB_TC_STATS 0x0B20139C
#define QLA83XX_PORT2_IFB_PAUSE_THRS 0x0B200704
#define QLA83XX_PORT3_IFB_PAUSE_THRS 0x0B201704
/* set value to pause threshold value */
#define QLA83XX_SET_PAUSE_VAL 0x0
#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
#define QLA83XX_RESET_CONTROL 0x28084E50
#define QLA83XX_RESET_REG 0x28084E60
#define QLA83XX_RESET_PORT0 0x28084E70
#define QLA83XX_RESET_PORT1 0x28084E80
#define QLA83XX_RESET_PORT2 0x28084E90
#define QLA83XX_RESET_PORT3 0x28084EA0
#define QLA83XX_RESET_SRE_SHIM 0x28084EB0
#define QLA83XX_RESET_EPG_SHIM 0x28084EC0
#define QLA83XX_RESET_ETHER_PCS 0x28084ED0
/* qla_83xx_reg_tbl registers */
#define QLA83XX_PEG_HALT_STATUS1 0x34A8
#define QLA83XX_PEG_HALT_STATUS2 0x34AC
#define QLA83XX_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
#define QLA83XX_FW_CAPABILITIES 0x3528
#define QLA83XX_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
#define QLA83XX_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
#define QLA83XX_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
#define QLA83XX_CRB_DRV_SCRATCH 0x3548
#define QLA83XX_CRB_DEV_PART_INFO1 0x37E0
#define QLA83XX_CRB_DEV_PART_INFO2 0x37E4
#define QLA83XX_FW_VER_MAJOR 0x3550
#define QLA83XX_FW_VER_MINOR 0x3554
#define QLA83XX_FW_VER_SUB 0x3558
#define QLA83XX_NPAR_STATE 0x359C
#define QLA83XX_FW_IMAGE_VALID 0x35FC
#define QLA83XX_CMDPEG_STATE 0x3650
#define QLA83XX_ASIC_TEMP 0x37B4
#define QLA83XX_FW_API 0x356C
#define QLA83XX_DRV_OP_MODE 0x3570
static const uint32_t qla4_83xx_reg_tbl[] = {
QLA83XX_PEG_HALT_STATUS1,
QLA83XX_PEG_HALT_STATUS2,
QLA83XX_PEG_ALIVE_COUNTER,
QLA83XX_CRB_DRV_ACTIVE,
QLA83XX_CRB_DEV_STATE,
QLA83XX_CRB_DRV_STATE,
QLA83XX_CRB_DRV_SCRATCH,
QLA83XX_CRB_DEV_PART_INFO1,
QLA83XX_CRB_IDC_VER_MAJOR,
QLA83XX_FW_VER_MAJOR,
QLA83XX_FW_VER_MINOR,
QLA83XX_FW_VER_SUB,
QLA83XX_CMDPEG_STATE,
QLA83XX_ASIC_TEMP,
};
#define QLA83XX_CRB_WIN_BASE 0x3800
#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4))
#define QLA83XX_SEM_LOCK_BASE 0x3840
#define QLA83XX_SEM_UNLOCK_BASE 0x3844
#define QLA83XX_SEM_LOCK_FUNC(f) (QLA83XX_SEM_LOCK_BASE+((f)*8))
#define QLA83XX_SEM_UNLOCK_FUNC(f) (QLA83XX_SEM_UNLOCK_BASE+((f)*8))
#define QLA83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
#define QLA83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
#define QLA83XX_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
#define QLA83XX_LINK_SPEED_FACTOR 10
/* FLASH API Defines */
#define QLA83xx_FLASH_MAX_WAIT_USEC 100
#define QLA83XX_FLASH_LOCK_TIMEOUT 10000
#define QLA83XX_FLASH_SECTOR_SIZE 65536
#define QLA83XX_DRV_LOCK_TIMEOUT 2000
#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
#define QLA83XX_FLASH_WRITE_CMD 0xdacdacda
#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca
#define QLA83XX_FLASH_READ_RETRY_COUNT 2000
#define QLA83XX_FLASH_STATUS_READY 0x6
#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2
#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64
#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
#define QLA83XX_ERASE_MODE 1
#define QLA83XX_WRITE_MODE 2
#define QLA83XX_DWORD_WRITE_MODE 3
#define QLA83XX_GLOBAL_RESET 0x38CC
#define QLA83XX_WILDCARD 0x38F0
#define QLA83XX_INFORMANT 0x38FC
#define QLA83XX_HOST_MBX_CTRL 0x3038
#define QLA83XX_FW_MBX_CTRL 0x303C
#define QLA83XX_BOOTLOADER_ADDR 0x355C
#define QLA83XX_BOOTLOADER_SIZE 0x3560
#define QLA83XX_FW_IMAGE_ADDR 0x3564
#define QLA83XX_MBX_INTR_ENABLE 0x1000
#define QLA83XX_MBX_INTR_MASK 0x1200
/* IDC Control Register bit defines */
#define DONTRESET_BIT0 0x1
#define GRACEFUL_RESET_BIT1 0x2
#define QLA83XX_HALT_STATUS_INFORMATIONAL (0x1 << 29)
#define QLA83XX_HALT_STATUS_FW_RESET (0x2 << 29)
#define QLA83XX_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
/* Firmware image definitions */
#define QLA83XX_BOOTLOADER_FLASH_ADDR 0x10000
#define QLA83XX_BOOT_FROM_FLASH 0
#define QLA83XX_IDC_PARAM_ADDR 0x3e8020
/* Reset template definitions */
#define QLA83XX_MAX_RESET_SEQ_ENTRIES 16
#define QLA83XX_RESTART_TEMPLATE_SIZE 0x2000
#define QLA83XX_RESET_TEMPLATE_ADDR 0x4F0000
#define QLA83XX_RESET_SEQ_VERSION 0x0101
/* Reset template entry opcodes */
#define OPCODE_NOP 0x0000
#define OPCODE_WRITE_LIST 0x0001
#define OPCODE_READ_WRITE_LIST 0x0002
#define OPCODE_POLL_LIST 0x0004
#define OPCODE_POLL_WRITE_LIST 0x0008
#define OPCODE_READ_MODIFY_WRITE 0x0010
#define OPCODE_SEQ_PAUSE 0x0020
#define OPCODE_SEQ_END 0x0040
#define OPCODE_TMPL_END 0x0080
#define OPCODE_POLL_READ_LIST 0x0100
/* Template Header */
#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
struct qla4_83xx_reset_template_hdr {
__le16 version;
__le16 signature;
__le16 size;
__le16 entries;
__le16 hdr_size;
__le16 checksum;
__le16 init_seq_offset;
__le16 start_seq_offset;
} __packed;
/* Common Entry Header. */
struct qla4_83xx_reset_entry_hdr {
__le16 cmd;
__le16 size;
__le16 count;
__le16 delay;
} __packed;
/* Generic poll entry type. */
struct qla4_83xx_poll {
__le32 test_mask;
__le32 test_value;
} __packed;
/* Read modify write entry type. */
struct qla4_83xx_rmw {
__le32 test_mask;
__le32 xor_value;
__le32 or_value;
uint8_t shl;
uint8_t shr;
uint8_t index_a;
uint8_t rsvd;
} __packed;
/* Generic Entry Item with 2 DWords. */
struct qla4_83xx_entry {
__le32 arg1;
__le32 arg2;
} __packed;
/* Generic Entry Item with 4 DWords.*/
struct qla4_83xx_quad_entry {
__le32 dr_addr;
__le32 dr_value;
__le32 ar_addr;
__le32 ar_value;
} __packed;
struct qla4_83xx_reset_template {
int seq_index;
int seq_error;
int array_index;
uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
uint8_t *buff;
uint8_t *stop_offset;
uint8_t *start_offset;
uint8_t *init_offset;
struct qla4_83xx_reset_template_hdr *hdr;
uint8_t seq_end;
uint8_t template_end;
};
/* POLLRD Entry */
struct qla83xx_minidump_entry_pollrd {
struct qla8xxx_minidump_entry_hdr h;
uint32_t select_addr;
uint32_t read_addr;
uint32_t select_value;
uint16_t select_value_stride;
uint16_t op_count;
uint32_t poll_wait;
uint32_t poll_mask;
uint32_t data_size;
uint32_t rsvd_1;
};
struct qla8044_minidump_entry_rddfe {
struct qla8xxx_minidump_entry_hdr h;
uint32_t addr_1;
uint32_t value;
uint8_t stride;
uint8_t stride2;
uint16_t count;
uint32_t poll;
uint32_t mask;
uint32_t modify_mask;
uint32_t data_size;
uint32_t rsvd;
} __packed;
struct qla8044_minidump_entry_rdmdio {
struct qla8xxx_minidump_entry_hdr h;
uint32_t addr_1;
uint32_t addr_2;
uint32_t value_1;
uint8_t stride_1;
uint8_t stride_2;
uint16_t count;
uint32_t poll;
uint32_t mask;
uint32_t value_2;
uint32_t data_size;
} __packed;
struct qla8044_minidump_entry_pollwr {
struct qla8xxx_minidump_entry_hdr h;
uint32_t addr_1;
uint32_t addr_2;
uint32_t value_1;
uint32_t value_2;
uint32_t poll;
uint32_t mask;
uint32_t data_size;
uint32_t rsvd;
} __packed;
/* RDMUX2 Entry */
struct qla83xx_minidump_entry_rdmux2 {
struct qla8xxx_minidump_entry_hdr h;
uint32_t select_addr_1;
uint32_t select_addr_2;
uint32_t select_value_1;
uint32_t select_value_2;
uint32_t op_count;
uint32_t select_value_mask;
uint32_t read_addr;
uint8_t select_value_stride;
uint8_t data_size;
uint8_t rsvd[2];
};
/* POLLRDMWR Entry */
struct qla83xx_minidump_entry_pollrdmwr {
struct qla8xxx_minidump_entry_hdr h;
uint32_t addr_1;
uint32_t addr_2;
uint32_t value_1;
uint32_t value_2;
uint32_t poll_wait;
uint32_t poll_mask;
uint32_t modify_mask;
uint32_t data_size;
};
/* IDC additional information */
struct qla4_83xx_idc_information {
uint32_t request_desc; /* IDC request descriptor */
uint32_t info1; /* IDC additional info */
uint32_t info2; /* IDC additional info */
uint32_t info3; /* IDC additional info */
};
#define QLA83XX_PEX_DMA_ENGINE_INDEX 8
#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000
#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000
#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0
#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04
#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08
#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024)
#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
/* Read Memory: For Pex-DMA */
struct qla4_83xx_minidump_entry_rdmem_pex_dma {
struct qla8xxx_minidump_entry_hdr h;
uint32_t desc_card_addr;
uint16_t dma_desc_cmd;
uint8_t rsvd[2];
uint32_t start_dma_cmd;
uint8_t rsvd2[12];
uint32_t read_addr;
uint32_t read_data_size;
};
struct qla4_83xx_pex_dma_descriptor {
struct {
uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
uint8_t rsvd[2];
uint16_t dma_desc_cmd;
} cmd;
uint64_t src_addr;
uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func,
* 8-15: desc-cmd */
uint8_t rsvd[24];
} __packed;
#endif

View file

@ -0,0 +1,351 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
static ssize_t
qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
if (is_qla40XX(ha))
return -EINVAL;
if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
return 0;
return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_size);
}
static ssize_t
qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
uint32_t dev_state;
long reading;
int ret = 0;
if (is_qla40XX(ha))
return -EINVAL;
if (off != 0)
return ret;
buf[1] = 0;
ret = kstrtol(buf, 10, &reading);
if (ret) {
ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
__func__, ret);
return ret;
}
switch (reading) {
case 0:
/* clear dump collection flags */
if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
/* Reload minidump template */
qla4xxx_alloc_fw_dump(ha);
DEBUG2(ql4_printk(KERN_INFO, ha,
"Firmware template reloaded\n"));
}
break;
case 1:
/* Set flag to read dump */
if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
!test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
set_bit(AF_82XX_DUMP_READING, &ha->flags);
DEBUG2(ql4_printk(KERN_INFO, ha,
"Raw firmware dump ready for read on (%ld).\n",
ha->host_no));
}
break;
case 2:
/* Reset HBA and collect FW dump */
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
if (dev_state == QLA8XXX_DEV_READY) {
ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
__func__);
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
if (is_qla8022(ha) ||
((is_qla8032(ha) || is_qla8042(ha)) &&
qla4_83xx_can_perform_reset(ha))) {
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
set_bit(AF_FW_RECOVERY, &ha->flags);
ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
__func__, ha->func_num);
}
} else
ql4_printk(KERN_INFO, ha,
"%s: Reset not performed as device state is 0x%x\n",
__func__, dev_state);
ha->isp_ops->idc_unlock(ha);
break;
default:
/* do nothing */
break;
}
return count;
}
static struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
.read = qla4_8xxx_sysfs_read_fw_dump,
.write = qla4_8xxx_sysfs_write_fw_dump,
};
static struct sysfs_entry {
char *name;
struct bin_attribute *attr;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr },
{ NULL },
};
void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
{
struct Scsi_Host *host = ha->host;
struct sysfs_entry *iter;
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
ql4_printk(KERN_ERR, ha,
"Unable to create sysfs %s binary attribute (%d).\n",
iter->name, ret);
}
}
void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
{
struct Scsi_Host *host = ha->host;
struct sysfs_entry *iter;
for (iter = bin_file_entries; iter->name; iter++)
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
}
/* Scsi_Host attributes. */
static ssize_t
qla4xxx_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (is_qla80XX(ha))
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
ha->fw_info.fw_major, ha->fw_info.fw_minor,
ha->fw_info.fw_patch, ha->fw_info.fw_build);
else
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
ha->fw_info.fw_major, ha->fw_info.fw_minor,
ha->fw_info.fw_patch, ha->fw_info.fw_build);
}
static ssize_t
qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
}
static ssize_t
qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
ha->fw_info.iscsi_minor);
}
static ssize_t
qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
ha->fw_info.bootload_major, ha->fw_info.bootload_minor,
ha->fw_info.bootload_patch, ha->fw_info.bootload_build);
}
static ssize_t
qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
}
static ssize_t
qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
qla4xxx_get_firmware_state(ha);
return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
ha->addl_fw_state);
}
static ssize_t
qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (is_qla40XX(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
}
static ssize_t
qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (is_qla40XX(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
}
static ssize_t
qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
if (is_qla40XX(ha))
return -ENOSYS;
return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
}
static ssize_t
qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
}
static ssize_t
qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
ha->fw_info.fw_build_time);
}
static ssize_t
qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
}
static ssize_t
qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
}
static ssize_t
qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
char *load_src = NULL;
switch (ha->fw_info.fw_load_source) {
case 1:
load_src = "Flash Primary";
break;
case 2:
load_src = "Flash Secondary";
break;
case 3:
load_src = "Host Download";
break;
}
return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
}
static ssize_t
qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
qla4xxx_about_firmware(ha);
return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
ha->fw_uptime_msecs);
}
static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL);
static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL);
static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL);
static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL);
static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
NULL);
static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_fw_version,
&dev_attr_serial_num,
&dev_attr_iscsi_version,
&dev_attr_optrom_version,
&dev_attr_board_id,
&dev_attr_fw_state,
&dev_attr_phy_port_cnt,
&dev_attr_phy_port_num,
&dev_attr_iscsi_func_cnt,
&dev_attr_hba_model,
&dev_attr_fw_timestamp,
&dev_attr_fw_build_user,
&dev_attr_fw_ext_timestamp,
&dev_attr_fw_load_src,
&dev_attr_fw_uptime,
NULL,
};

View file

@ -0,0 +1,873 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2011-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_bsg.h"
static int
qla4xxx_read_flash(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
struct iscsi_bsg_request *bsg_req = bsg_job->request;
uint32_t offset = 0;
uint32_t length = 0;
dma_addr_t flash_dma;
uint8_t *flash = NULL;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
if (ha->flash_state != QLFLASH_WAITING) {
ql4_printk(KERN_ERR, ha, "%s: another flash operation "
"active\n", __func__);
rval = -EBUSY;
goto leave;
}
ha->flash_state = QLFLASH_READING;
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
length = bsg_job->reply_payload.payload_len;
flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
GFP_KERNEL);
if (!flash) {
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
"data\n", __func__);
rval = -ENOMEM;
goto leave;
}
rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else {
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt,
flash, length);
bsg_reply->result = DID_OK << 16;
}
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
leave:
ha->flash_state = QLFLASH_WAITING;
return rval;
}
static int
qla4xxx_update_flash(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
struct iscsi_bsg_request *bsg_req = bsg_job->request;
uint32_t length = 0;
uint32_t offset = 0;
uint32_t options = 0;
dma_addr_t flash_dma;
uint8_t *flash = NULL;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
if (ha->flash_state != QLFLASH_WAITING) {
ql4_printk(KERN_ERR, ha, "%s: another flash operation "
"active\n", __func__);
rval = -EBUSY;
goto leave;
}
ha->flash_state = QLFLASH_WRITING;
length = bsg_job->request_payload.payload_len;
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
GFP_KERNEL);
if (!flash) {
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
"data\n", __func__);
rval = -ENOMEM;
goto leave;
}
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, flash, length);
rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else
bsg_reply->result = DID_OK << 16;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
leave:
ha->flash_state = QLFLASH_WAITING;
return rval;
}
static int
qla4xxx_get_acb_state(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint32_t status[MBOX_REG_COUNT];
uint32_t acb_idx;
uint32_t ip_idx;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
/* Only 4022 and above adapters are supported */
if (is_qla4010(ha))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
if (bsg_job->reply_payload.payload_len < sizeof(status)) {
ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
__func__, bsg_job->reply_payload.payload_len);
rval = -EINVAL;
goto leave;
}
acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
__func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else {
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt,
status, sizeof(status));
bsg_reply->result = DID_OK << 16;
}
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
leave:
return rval;
}
static int
qla4xxx_read_nvram(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint32_t offset = 0;
uint32_t len = 0;
uint32_t total_len = 0;
dma_addr_t nvram_dma;
uint8_t *nvram = NULL;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
/* Only 40xx adapters are supported */
if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
len = bsg_job->reply_payload.payload_len;
total_len = offset + len;
/* total len should not be greater than max NVRAM size */
if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
((is_qla4022(ha) || is_qla4032(ha)) &&
total_len > QL40X2_NVRAM_SIZE)) {
ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
" nvram size, offset=%d len=%d\n",
__func__, offset, len);
goto leave;
}
nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
GFP_KERNEL);
if (!nvram) {
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
"data\n", __func__);
rval = -ENOMEM;
goto leave;
}
rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else {
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt,
nvram, len);
bsg_reply->result = DID_OK << 16;
}
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
leave:
return rval;
}
static int
qla4xxx_update_nvram(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint32_t offset = 0;
uint32_t len = 0;
uint32_t total_len = 0;
dma_addr_t nvram_dma;
uint8_t *nvram = NULL;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
len = bsg_job->request_payload.payload_len;
total_len = offset + len;
/* total len should not be greater than max NVRAM size */
if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
((is_qla4022(ha) || is_qla4032(ha)) &&
total_len > QL40X2_NVRAM_SIZE)) {
ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
" nvram size, offset=%d len=%d\n",
__func__, offset, len);
goto leave;
}
nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
GFP_KERNEL);
if (!nvram) {
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
"data\n", __func__);
rval = -ENOMEM;
goto leave;
}
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, nvram, len);
rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else
bsg_reply->result = DID_OK << 16;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
leave:
return rval;
}
static int
qla4xxx_restore_defaults(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint32_t region = 0;
uint32_t field0 = 0;
uint32_t field1 = 0;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
if (is_qla4010(ha))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else
bsg_reply->result = DID_OK << 16;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
leave:
return rval;
}
static int
qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint32_t acb_type = 0;
uint32_t len = 0;
dma_addr_t acb_dma;
uint8_t *acb = NULL;
int rval = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
goto leave;
/* Only 4022 and above adapters are supported */
if (is_qla4010(ha))
goto leave;
if (ql4xxx_reset_active(ha)) {
ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
rval = -EBUSY;
goto leave;
}
acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
len = bsg_job->reply_payload.payload_len;
if (len < sizeof(struct addr_ctrl_blk)) {
ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
__func__, len);
rval = -EINVAL;
goto leave;
}
acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
if (!acb) {
ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
"data\n", __func__);
rval = -ENOMEM;
goto leave;
}
rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
if (rval) {
ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
bsg_reply->result = DID_ERROR << 16;
rval = -EIO;
} else {
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt,
acb, len);
bsg_reply->result = DID_OK << 16;
}
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
leave:
return rval;
}
static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint8_t *rsp_ptr = NULL;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
__func__);
bsg_reply->result = DID_ERROR << 16;
goto exit_diag_mem_test;
}
bsg_reply->reply_payload_rcv_len = 0;
memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
sizeof(uint32_t) * MBOX_REG_COUNT);
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
__func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
mbox_cmd[7]));
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
&mbox_sts[0]);
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
__func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
mbox_sts[7]));
if (status == QLA_SUCCESS)
bsg_reply->result = DID_OK << 16;
else
bsg_reply->result = DID_ERROR << 16;
/* Send mbox_sts to application */
bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
exit_diag_mem_test:
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: bsg_reply->result = x%x, status = %s\n",
__func__, bsg_reply->result, STATUS(status)));
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
}
static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
int wait_for_link)
{
int status = QLA_SUCCESS;
if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
__func__, ha->idc_extend_tmo);
if (ha->idc_extend_tmo) {
if (!wait_for_completion_timeout(&ha->idc_comp,
(ha->idc_extend_tmo * HZ))) {
ha->notify_idc_comp = 0;
ha->notify_link_up_comp = 0;
ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
__func__);
status = QLA_ERROR;
goto exit_wait;
} else {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: IDC Complete notification received\n",
__func__));
}
}
} else {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: IDC Complete notification received\n",
__func__));
}
ha->notify_idc_comp = 0;
if (wait_for_link) {
if (!wait_for_completion_timeout(&ha->link_up_comp,
(IDC_COMP_TOV * HZ))) {
ha->notify_link_up_comp = 0;
ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
__func__);
status = QLA_ERROR;
goto exit_wait;
} else {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: LINK UP notification received\n",
__func__));
}
ha->notify_link_up_comp = 0;
}
exit_wait:
return status;
}
static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
uint32_t *mbox_cmd)
{
uint32_t config = 0;
int status = QLA_SUCCESS;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
status = qla4_83xx_get_port_config(ha, &config);
if (status != QLA_SUCCESS)
goto exit_pre_loopback_config;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
__func__, config));
if ((config & ENABLE_INTERNAL_LOOPBACK) ||
(config & ENABLE_EXTERNAL_LOOPBACK)) {
ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n",
__func__);
goto exit_pre_loopback_config;
}
if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
config |= ENABLE_INTERNAL_LOOPBACK;
if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
config |= ENABLE_EXTERNAL_LOOPBACK;
config &= ~ENABLE_DCBX;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
__func__, config));
ha->notify_idc_comp = 1;
ha->notify_link_up_comp = 1;
/* get the link state */
qla4xxx_get_firmware_state(ha);
status = qla4_83xx_set_port_config(ha, &config);
if (status != QLA_SUCCESS) {
ha->notify_idc_comp = 0;
ha->notify_link_up_comp = 0;
goto exit_pre_loopback_config;
}
exit_pre_loopback_config:
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
STATUS(status)));
return status;
}
static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
uint32_t *mbox_cmd)
{
int status = QLA_SUCCESS;
uint32_t config = 0;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
status = qla4_83xx_get_port_config(ha, &config);
if (status != QLA_SUCCESS)
goto exit_post_loopback_config;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
config));
if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
config &= ~ENABLE_INTERNAL_LOOPBACK;
else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
config &= ~ENABLE_EXTERNAL_LOOPBACK;
config |= ENABLE_DCBX;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: Restore default port config=%08X\n", __func__,
config));
ha->notify_idc_comp = 1;
if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
ha->notify_link_up_comp = 1;
status = qla4_83xx_set_port_config(ha, &config);
if (status != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
__func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
clear_bit(AF_LOOPBACK, &ha->flags);
goto exit_post_loopback_config;
}
exit_post_loopback_config:
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
STATUS(status)));
return status;
}
static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
uint8_t *rsp_ptr = NULL;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int wait_for_link = 1;
int status = QLA_ERROR;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
bsg_reply->reply_payload_rcv_len = 0;
if (test_bit(AF_LOOPBACK, &ha->flags)) {
ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
__func__);
bsg_reply->result = DID_ERROR << 16;
goto exit_loopback_cmd;
}
if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
__func__);
bsg_reply->result = DID_ERROR << 16;
goto exit_loopback_cmd;
}
memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
sizeof(uint32_t) * MBOX_REG_COUNT);
if (is_qla8032(ha) || is_qla8042(ha)) {
status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
if (status != QLA_SUCCESS) {
bsg_reply->result = DID_ERROR << 16;
goto exit_loopback_cmd;
}
status = qla4_83xx_wait_for_loopback_config_comp(ha,
wait_for_link);
if (status != QLA_SUCCESS) {
bsg_reply->result = DID_TIME_OUT << 16;
goto restore;
}
}
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
__func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
mbox_cmd[7]));
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
&mbox_sts[0]);
if (status == QLA_SUCCESS)
bsg_reply->result = DID_OK << 16;
else
bsg_reply->result = DID_ERROR << 16;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
__func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
mbox_sts[7]));
/* Send mbox_sts to application */
bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
restore:
if (is_qla8032(ha) || is_qla8042(ha)) {
status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
if (status != QLA_SUCCESS) {
bsg_reply->result = DID_ERROR << 16;
goto exit_loopback_cmd;
}
/* for pre_loopback_config() wait for LINK UP only
* if PHY LINK is UP */
if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
wait_for_link = 0;
status = qla4_83xx_wait_for_loopback_config_comp(ha,
wait_for_link);
if (status != QLA_SUCCESS) {
bsg_reply->result = DID_TIME_OUT << 16;
goto exit_loopback_cmd;
}
}
exit_loopback_cmd:
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: bsg_reply->result = x%x, status = %s\n",
__func__, bsg_reply->result, STATUS(status)));
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
}
static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
{
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
struct iscsi_bsg_request *bsg_req = bsg_job->request;
uint32_t diag_cmd;
int rval = -EINVAL;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
if (diag_cmd == MBOX_CMD_DIAG_TEST) {
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
case QL_DIAG_CMD_TEST_DDR_SIZE:
case QL_DIAG_CMD_TEST_DDR_RW:
case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
case QL_DIAG_CMD_TEST_NVRAM:
case QL_DIAG_CMD_TEST_FLASH_ROM:
case QL_DIAG_CMD_TEST_DMA_XFER:
case QL_DIAG_CMD_SELF_DDR_RW:
case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
/* Execute diag test for adapter RAM/FLASH */
ql4xxx_execute_diag_cmd(bsg_job);
/* Always return success as we want to sent bsg_reply
* to Application */
rval = QLA_SUCCESS;
break;
case QL_DIAG_CMD_TEST_INT_LOOPBACK:
case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
/* Execute diag test for Network */
qla4xxx_execute_diag_loopback_cmd(bsg_job);
/* Always return success as we want to sent bsg_reply
* to Application */
rval = QLA_SUCCESS;
break;
default:
ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
__func__,
bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
}
} else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
(diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
ql4xxx_execute_diag_cmd(bsg_job);
rval = QLA_SUCCESS;
} else {
ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
__func__, diag_cmd);
}
return rval;
}
/**
* qla4xxx_process_vendor_specific - handle vendor specific bsg request
* @job: iscsi_bsg_job to handle
**/
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
{
struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
case QLISCSI_VND_READ_FLASH:
return qla4xxx_read_flash(bsg_job);
case QLISCSI_VND_UPDATE_FLASH:
return qla4xxx_update_flash(bsg_job);
case QLISCSI_VND_GET_ACB_STATE:
return qla4xxx_get_acb_state(bsg_job);
case QLISCSI_VND_READ_NVRAM:
return qla4xxx_read_nvram(bsg_job);
case QLISCSI_VND_UPDATE_NVRAM:
return qla4xxx_update_nvram(bsg_job);
case QLISCSI_VND_RESTORE_DEFAULTS:
return qla4xxx_restore_defaults(bsg_job);
case QLISCSI_VND_GET_ACB:
return qla4xxx_bsg_get_acb(bsg_job);
case QLISCSI_VND_DIAG_TEST:
return qla4xxx_execute_diag_test(bsg_job);
default:
ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
"0x%x\n", __func__, bsg_req->msgcode);
bsg_reply->result = (DID_ERROR << 16);
bsg_reply->reply_payload_rcv_len = 0;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
return -ENOSYS;
}
}
/**
* qla4xxx_bsg_request - handle bsg request from ISCSI transport
* @job: iscsi_bsg_job to handle
*/
int qla4xxx_bsg_request(struct bsg_job *bsg_job)
{
struct iscsi_bsg_request *bsg_req = bsg_job->request;
struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
struct scsi_qla_host *ha = to_qla_host(host);
switch (bsg_req->msgcode) {
case ISCSI_BSG_HST_VENDOR:
return qla4xxx_process_vendor_specific(bsg_job);
default:
ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
__func__, bsg_req->msgcode);
}
return -ENOSYS;
}

View file

@ -0,0 +1,32 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2011 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_BSG_H
#define __QL4_BSG_H
/* BSG Vendor specific commands */
#define QLISCSI_VND_READ_FLASH 1
#define QLISCSI_VND_UPDATE_FLASH 2
#define QLISCSI_VND_GET_ACB_STATE 3
#define QLISCSI_VND_READ_NVRAM 4
#define QLISCSI_VND_UPDATE_NVRAM 5
#define QLISCSI_VND_RESTORE_DEFAULTS 6
#define QLISCSI_VND_GET_ACB 7
#define QLISCSI_VND_DIAG_TEST 8
/* QLISCSI_VND_DIAG_CMD sub code */
#define QL_DIAG_CMD_TEST_DDR_SIZE 0x2
#define QL_DIAG_CMD_TEST_DDR_RW 0x3
#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW 0x4
#define QL_DIAG_CMD_TEST_NVRAM 0x5 /* Only ISP4XXX */
#define QL_DIAG_CMD_TEST_FLASH_ROM 0x6
#define QL_DIAG_CMD_TEST_INT_LOOPBACK 0x7
#define QL_DIAG_CMD_TEST_EXT_LOOPBACK 0x8
#define QL_DIAG_CMD_TEST_DMA_XFER 0x9 /* Only ISP4XXX */
#define QL_DIAG_CMD_SELF_DDR_RW 0xC
#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW 0xD
#endif

View file

@ -0,0 +1,162 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
void qla4xxx_dump_buffer(void *b, uint32_t size)
{
uint32_t cnt;
uint8_t *c = b;
printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
"Fh\n");
printk("------------------------------------------------------------"
"--\n");
for (cnt = 0; cnt < size; c++) {
printk("%02x", *c);
if (!(++cnt % 16))
printk("\n");
else
printk(" ");
}
printk(KERN_INFO "\n");
}
void qla4xxx_dump_registers(struct scsi_qla_host *ha)
{
uint8_t i;
if (is_qla8022(ha)) {
for (i = 1; i < MBOX_REG_COUNT; i++)
printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
return;
}
for (i = 0; i < MBOX_REG_COUNT; i++) {
printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
readw(&ha->reg->mailbox[i]));
}
printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, flash_address),
readw(&ha->reg->flash_address));
printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, flash_data),
readw(&ha->reg->flash_data));
printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, ctrl_status),
readw(&ha->reg->ctrl_status));
if (is_qla4010(ha)) {
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
readw(&ha->reg->u1.isp4010.nvram));
} else if (is_qla4022(ha) | is_qla4032(ha)) {
printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
readw(&ha->reg->u1.isp4022.intr_mask));
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
readw(&ha->reg->u1.isp4022.nvram));
printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
readw(&ha->reg->u1.isp4022.semaphore));
}
printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, req_q_in),
readw(&ha->reg->req_q_in));
printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, rsp_q_out),
readw(&ha->reg->rsp_q_out));
if (is_qla4010(ha)) {
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
readw(&ha->reg->u2.isp4010.ext_hw_conf));
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
readw(&ha->reg->u2.isp4010.port_ctrl));
printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
readw(&ha->reg->u2.isp4010.port_status));
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
readw(&ha->reg->u2.isp4010.req_q_out));
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
readw(&ha->reg->u2.isp4010.gp_out));
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
readw(&ha->reg->u2.isp4010.gp_in));
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
offsetof(struct isp_reg, u2.isp4010.port_err_status),
readw(&ha->reg->u2.isp4010.port_err_status));
} else if (is_qla4022(ha) | is_qla4032(ha)) {
printk(KERN_INFO "Page 0 Registers:\n");
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t)
offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t)
offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
readw(&ha->reg->u2.isp4022.p0.port_ctrl));
printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t)
offsetof(struct isp_reg, u2.isp4022.p0.port_status),
readw(&ha->reg->u2.isp4022.p0.port_status));
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
readw(&ha->reg->u2.isp4022.p0.gp_out));
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
readw(&ha->reg->u2.isp4022.p0.gp_in));
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t)
offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
readw(&ha->reg->u2.isp4022.p0.port_err_status));
printk(KERN_INFO "Page 1 Registers:\n");
writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
&ha->reg->ctrl_status);
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
readw(&ha->reg->u2.isp4022.p1.req_q_out));
writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
&ha->reg->ctrl_status);
}
}
void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
{
uint32_t halt_status1, halt_status2;
halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
if (is_qla8022(ha)) {
ql4_printk(KERN_INFO, ha,
"scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
" PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
" PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__,
ha->pdev->device, halt_status1, halt_status2,
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
} else if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha,
"scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
ha->host_no, __func__, ha->pdev->device,
halt_status1, halt_status2);
}
}

View file

@ -0,0 +1,62 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
* Driver debug definitions.
*/
/* #define QL_DEBUG */ /* DEBUG messages */
/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
/* #define QL_DEBUG_LEVEL_4 */
/* #define QL_DEBUG_LEVEL_5 */
/* #define QL_DEBUG_LEVEL_7 */
/* #define QL_DEBUG_LEVEL_9 */
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
#if defined(QL_DEBUG)
#define DEBUG(x) do {x;} while (0);
#else
#define DEBUG(x) do {} while (0);
#endif
#if defined(QL_DEBUG_LEVEL_2)
#define DEBUG2(x) do {if(ql4xextended_error_logging == 2) x;} while (0);
#define DEBUG2_3(x) do {x;} while (0);
#else /* */
#define DEBUG2(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_3)
#define DEBUG3(x) do {if(ql4xextended_error_logging == 3) x;} while (0);
#else /* */
#define DEBUG3(x) do {} while (0);
#if !defined(QL_DEBUG_LEVEL_2)
#define DEBUG2_3(x) do {} while (0);
#endif /* */
#endif /* */
#if defined(QL_DEBUG_LEVEL_4)
#define DEBUG4(x) do {x;} while (0);
#else /* */
#define DEBUG4(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_5)
#define DEBUG5(x) do {x;} while (0);
#else /* */
#define DEBUG5(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_7)
#define DEBUG7(x) do {x; } while (0)
#else /* */
#define DEBUG7(x) do {} while (0)
#endif /* */
#if defined(QL_DEBUG_LEVEL_9)
#define DEBUG9(x) do {x;} while (0);
#else /* */
#define DEBUG9(x) do {} while (0);
#endif /* */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,293 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H
struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
uint64_t lun);
int qla4xxx_reset_target(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t len);
int qla4xxx_get_firmware_status(struct scsi_qla_host *ha);
int qla4xxx_get_firmware_state(struct scsi_qla_host *ha);
int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha);
/* FIXME: Goodness! this really wants a small struct to hold the
* parameters. On x86 the args will get passed on the stack! */
int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma,
uint32_t *num_valid_ddb_entries,
uint32_t *next_ddb_index,
uint32_t *fw_ddb_device_state,
uint32_t *conn_err_detail,
uint16_t *tcp_source_port_num,
uint16_t *connection_id);
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts);
uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma);
int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
uint16_t connection_id,
uint16_t option);
int qla4xxx_disable_acb(struct scsi_qla_host *ha);
int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t acb_dma);
int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
uint32_t acb_type, uint32_t len);
int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
uint32_t ip_idx, uint32_t *sts);
void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session);
u16 rd_nvram_word(struct scsi_qla_host *ha, int offset);
u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset);
void qla4xxx_get_crash_record(struct scsi_qla_host *ha);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
int qla4xxx_about_firmware(struct scsi_qla_host *ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host *ha);
void qla4xxx_srb_compl(struct kref *ref);
struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
uint32_t index);
int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t state, uint32_t conn_error);
void qla4xxx_dump_buffer(void *b, uint32_t size);
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod);
int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t length, uint32_t options);
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
char *password, int bidi, uint16_t *chap_index);
int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
uint16_t idx, int bidi);
void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
int qla4xxx_get_sys_info(struct scsi_qla_host *ha);
int qla4xxx_iospace_config(struct scsi_qla_host *ha);
void qla4xxx_pci_config(struct scsi_qla_host *ha);
int qla4xxx_start_firmware(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
int qla4xxx_request_irqs(struct scsi_qla_host *ha);
void qla4xxx_free_irqs(struct scsi_qla_host *ha);
void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
void qla4xxx_dump_registers(struct scsi_qla_host *ha);
uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
uint32_t *mbox_cmd,
uint32_t *mbox_sts,
struct addr_ctrl_blk *init_fw_cb,
dma_addr_t init_fw_cb_dma);
void qla4_8xxx_pci_config(struct scsi_qla_host *);
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
int qla4_8xxx_load_risc(struct scsi_qla_host *);
irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index);
int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
struct iscsi_cls_conn *cls_conn,
uint32_t *mbx_sts);
int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int options);
int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username,
char *password, uint16_t idx);
int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
uint32_t offset, uint32_t size);
int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
uint32_t offset, uint32_t size);
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
struct ddb_entry *ddb_entry, uint32_t state);
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
struct ddb_entry *ddb_entry, uint32_t state);
void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
enum iscsi_host_event_code aen_code,
uint32_t data_size, uint8_t *data);
int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
uint32_t status, uint32_t pid,
uint32_t data_size, uint8_t *data);
int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
dma_addr_t phys_addr);
int qla4xxx_req_template_size(struct scsi_qla_host *ha);
void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
int incount);
void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
int incount);
void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status);
int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
uint32_t *data);
int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
uint32_t data);
int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
int incount);
void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
uint32_t flash_addr, uint8_t *p_data,
int u32_word_count);
void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
uint8_t *p_data, int u32_word_count);
void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr);
int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
char *password, uint16_t chap_index);
int qla4xxx_disable_acb(struct scsi_qla_host *ha);
int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t acb_dma);
int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
uint32_t acb_type, uint32_t len);
int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
uint64_t addr, uint32_t *data, uint32_t count);
uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
int qla4_83xx_is_detached(struct scsi_qla_host *ha);
int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
extern int ql4xmdcapmask;
extern int ql4xenablemd;
extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,96 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
*
* qla4xxx_lookup_ddb_by_fw_index
* This routine locates a device handle given the firmware device
* database index. If device doesn't exist, returns NULL.
*
* Input:
* ha - Pointer to host adapter structure.
* fw_ddb_index - Firmware's device database index
*
* Returns:
* Pointer to the corresponding internal device database structure
*/
static inline struct ddb_entry *
qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
{
struct ddb_entry *ddb_entry = NULL;
if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
(ha->fw_ddb_index_map[fw_ddb_index] !=
(struct ddb_entry *) INVALID_ENTRY)) {
ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
}
DEBUG3(printk("scsi%d: %s: ddb [%d], ddb_entry = %p\n",
ha->host_no, __func__, fw_ddb_index, ddb_entry));
return ddb_entry;
}
static inline void
__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha) | is_qla4032(ha)) {
writel(set_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
} else {
writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
set_bit(AF_INTERRUPTS_ON, &ha->flags);
}
static inline void
__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha) | is_qla4032(ha)) {
writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
} else {
writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
clear_bit(AF_INTERRUPTS_ON, &ha->flags);
}
static inline void
qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
__qla4xxx_enable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline void
qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
__qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
{
int type;
if (chap_entry->flags & BIT_7)
type = LOCAL_CHAP;
else
type = BIDI_CHAP;
return type;
}

View file

@ -0,0 +1,552 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
#include <scsi/scsi_tcq.h>
static int
qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
{
uint16_t cnt;
/* Calculate number of free request entries. */
if ((req_cnt + 2) >= ha->req_q_count) {
cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
if (ha->request_in < cnt)
ha->req_q_count = cnt - ha->request_in;
else
ha->req_q_count = REQUEST_QUEUE_DEPTH -
(ha->request_in - cnt);
}
/* Check if room for request in request ring. */
if ((req_cnt + 2) < ha->req_q_count)
return 1;
else
return 0;
}
static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
{
/* Advance request queue pointer */
if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
ha->request_in = 0;
ha->request_ptr = ha->request_ring;
} else {
ha->request_in++;
ha->request_ptr++;
}
}
/**
* qla4xxx_get_req_pkt - returns a valid entry in request queue.
* @ha: Pointer to host adapter structure.
* @queue_entry: Pointer to pointer to queue entry structure
*
* This routine performs the following tasks:
* - returns the current request_in pointer (if queue not full)
* - advances the request_in pointer
* - checks for queue full
**/
static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
struct queue_entry **queue_entry)
{
uint16_t req_cnt = 1;
if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
*queue_entry = ha->request_ptr;
memset(*queue_entry, 0, sizeof(**queue_entry));
qla4xxx_advance_req_ring_ptr(ha);
ha->req_q_count -= req_cnt;
return QLA_SUCCESS;
}
return QLA_ERROR;
}
/**
* qla4xxx_send_marker_iocb - issues marker iocb to HBA
* @ha: Pointer to host adapter structure.
* @ddb_entry: Pointer to device database entry
* @lun: SCSI LUN
* @marker_type: marker identifier
*
* This routine issues a marker IOCB.
**/
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
{
struct qla4_marker_entry *marker_entry;
unsigned long flags = 0;
uint8_t status = QLA_SUCCESS;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get pointer to the queue entry for the marker */
if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
QLA_SUCCESS) {
status = QLA_ERROR;
goto exit_send_marker;
}
/* Put the marker in the request queue */
marker_entry->hdr.entryType = ET_MARKER;
marker_entry->hdr.entryCount = 1;
marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
marker_entry->modifier = cpu_to_le16(mrkr_mod);
int_to_scsilun(lun, &marker_entry->lun);
wmb();
/* Tell ISP it's got a new I/O request */
ha->isp_ops->queue_iocb(ha);
exit_send_marker:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return status;
}
static struct continuation_t1_entry *
qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
{
struct continuation_t1_entry *cont_entry;
cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
qla4xxx_advance_req_ring_ptr(ha);
/* Load packet defaults */
cont_entry->hdr.entryType = ET_CONTINUE;
cont_entry->hdr.entryCount = 1;
cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
return cont_entry;
}
static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > COMMAND_SEG) {
iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
iocbs++;
}
return iocbs;
}
static void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
{
struct scsi_qla_host *ha;
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
cmd = srb->cmd;
ha = srb->ha;
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
}
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
struct continuation_t1_entry *cont_entry;
cont_entry = qla4xxx_alloc_cont_entry(ha);
cur_dsd =
(struct data_seg_a64 *)
&cont_entry->dataseg[0];
avail_dsds = CONTINUE_SEG;
}
sle_dma = sg_dma_address(sg);
cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
cur_dsd++;
}
}
void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
{
writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
readl(&ha->qla4_83xx_reg->req_q_in);
}
void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
{
writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
readl(&ha->qla4_83xx_reg->rsp_q_out);
}
/**
* qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
* @ha: pointer to host adapter structure.
*
* This routine notifies the ISP that one or more new request
* queue entries have been placed on the request queue.
**/
void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
{
uint32_t dbval = 0;
dbval = 0x14 | (ha->func_num << 5);
dbval = dbval | (0 << 8) | (ha->request_in << 16);
qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
}
/**
* qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
* @ha: pointer to host adapter structure.
*
* This routine notifies the ISP that one or more response/completion
* queue entries have been processed by the driver.
* This also clears the interrupt.
**/
void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
{
writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
readl(&ha->qla4_82xx_reg->rsp_q_out);
}
/**
* qla4xxx_queue_iocb - Tell ISP it's got new request(s)
* @ha: pointer to host adapter structure.
*
* This routine is notifies the ISP that one or more new request
* queue entries have been placed on the request queue.
**/
void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
{
writel(ha->request_in, &ha->reg->req_q_in);
readl(&ha->reg->req_q_in);
}
/**
* qla4xxx_complete_iocb - Tell ISP we're done with response(s)
* @ha: pointer to host adapter structure.
*
* This routine is notifies the ISP that one or more response/completion
* queue entries have been processed by the driver.
* This also clears the interrupt.
**/
void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
{
writel(ha->response_out, &ha->reg->rsp_q_out);
readl(&ha->reg->rsp_q_out);
}
/**
* qla4xxx_send_command_to_isp - issues command to HBA
* @ha: pointer to host adapter structure.
* @srb: pointer to SCSI Request Block to be sent to ISP
*
* This routine is called by qla4xxx_queuecommand to build an ISP
* command and pass it to the ISP for execution.
**/
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
{
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
int nseg;
uint16_t tot_dsds;
uint16_t req_cnt;
unsigned long flags;
uint32_t index;
char tag[2];
/* Get real lun and adapter */
ddb_entry = srb->ddb;
tot_dsds = 0;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
index = (uint32_t)cmd->request->tag;
/*
* Check to see if adapter is online before placing request on
* request queue. If a reset occurs and a request is in the queue,
* the firmware will still attempt to process the request, retrieving
* garbage for pointers.
*/
if (!test_bit(AF_ONLINE, &ha->flags)) {
DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
"Do not issue command.\n",
ha->host_no, __func__));
goto queuing_error;
}
/* Calculate the number of request entries needed. */
nseg = scsi_dma_map(cmd);
if (nseg < 0)
goto queuing_error;
tot_dsds = nseg;
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (!qla4xxx_space_in_req_ring(ha, req_cnt))
goto queuing_error;
/* total iocbs active */
if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
goto queuing_error;
/* Build command packet */
cmd_entry = (struct command_t3_entry *) ha->request_ptr;
memset(cmd_entry, 0, sizeof(struct command_t3_entry));
cmd_entry->hdr.entryType = ET_COMMAND;
cmd_entry->handle = cpu_to_le32(index);
cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
/* Set data transfer direction control flags
* NOTE: Look at data_direction bits iff there is data to be
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
if (scsi_bufflen(cmd)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
ha->bytes_xfered += scsi_bufflen(cmd);
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
}
}
/* Set tagged queueing control flags */
cmd_entry->control_flags |= CF_SIMPLE_TAG;
if (scsi_populate_tag_msg(cmd, tag))
switch (tag[0]) {
case MSG_HEAD_TAG:
cmd_entry->control_flags |= CF_HEAD_TAG;
break;
case MSG_ORDERED_TAG:
cmd_entry->control_flags |= CF_ORDERED_TAG;
break;
}
qla4xxx_advance_req_ring_ptr(ha);
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
srb->flags |= SRB_DMA_VALID;
/* Track IOCB used */
ha->iocb_cnt += req_cnt;
srb->iocb_cnt = req_cnt;
ha->req_q_count -= req_cnt;
ha->isp_ops->queue_iocb(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
}
int qla4xxx_send_passthru0(struct iscsi_task *task)
{
struct passthru0 *passthru_iocb;
struct iscsi_session *sess = task->conn->session;
struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
struct ql4_task_data *task_data = task->dd_data;
uint16_t ctrl_flags = 0;
unsigned long flags;
int ret = QLA_ERROR;
spin_lock_irqsave(&ha->hardware_lock, flags);
task_data->iocb_req_cnt = 1;
/* Put the IOCB on the request queue */
if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
goto queuing_error;
passthru_iocb = (struct passthru0 *) ha->request_ptr;
memset(passthru_iocb, 0, sizeof(struct passthru0));
passthru_iocb->hdr.entryType = ET_PASSTHRU0;
passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
passthru_iocb->handle = task->itt;
passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
/* Setup the out & in DSDs */
if (task_data->req_len) {
memcpy((uint8_t *)task_data->req_buffer +
sizeof(struct iscsi_hdr), task->data, task->data_count);
ctrl_flags |= PT_FLAG_SEND_BUFFER;
passthru_iocb->out_dsd.base.addrLow =
cpu_to_le32(LSDW(task_data->req_dma));
passthru_iocb->out_dsd.base.addrHigh =
cpu_to_le32(MSDW(task_data->req_dma));
passthru_iocb->out_dsd.count =
cpu_to_le32(task->data_count +
sizeof(struct iscsi_hdr));
}
if (task_data->resp_len) {
passthru_iocb->in_dsd.base.addrLow =
cpu_to_le32(LSDW(task_data->resp_dma));
passthru_iocb->in_dsd.base.addrHigh =
cpu_to_le32(MSDW(task_data->resp_dma));
passthru_iocb->in_dsd.count =
cpu_to_le32(task_data->resp_len);
}
ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
/* Update the request pointer */
qla4xxx_advance_req_ring_ptr(ha);
wmb();
/* Track IOCB used */
ha->iocb_cnt += task_data->iocb_req_cnt;
ha->req_q_count -= task_data->iocb_req_cnt;
ha->isp_ops->queue_iocb(ha);
ret = QLA_SUCCESS;
queuing_error:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ret;
}
static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
{
struct mrb *mrb;
mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
if (!mrb)
return mrb;
mrb->ha = ha;
return mrb;
}
static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
uint32_t *in_mbox)
{
int rval = QLA_SUCCESS;
uint32_t i;
unsigned long flags;
uint32_t index = 0;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get pointer to the queue entry for the marker */
rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
if (rval != QLA_SUCCESS)
goto exit_mbox_iocb;
index = ha->mrb_index;
/* get valid mrb index*/
for (i = 0; i < MAX_MRB; i++) {
index++;
if (index == MAX_MRB)
index = 1;
if (ha->active_mrb_array[index] == NULL) {
ha->mrb_index = index;
break;
}
}
mrb->iocb_cnt = 1;
ha->active_mrb_array[index] = mrb;
mrb->mbox->handle = index;
mrb->mbox->hdr.entryType = ET_MBOX_CMD;
mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
memcpy(mrb->mbox->in_mbox, in_mbox, 32);
mrb->mbox_cmd = in_mbox[0];
wmb();
ha->iocb_cnt += mrb->iocb_cnt;
ha->isp_ops->queue_iocb(ha);
exit_mbox_iocb:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
}
int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
{
uint32_t in_mbox[8];
struct mrb *mrb = NULL;
int rval = QLA_SUCCESS;
memset(in_mbox, 0, sizeof(in_mbox));
mrb = qla4xxx_get_new_mrb(ha);
if (!mrb) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
__func__));
rval = QLA_ERROR;
goto exit_ping;
}
in_mbox[0] = MBOX_CMD_PING;
in_mbox[1] = options;
memcpy(&in_mbox[2], &ipaddr[0], 4);
memcpy(&in_mbox[3], &ipaddr[4], 4);
memcpy(&in_mbox[4], &ipaddr[8], 4);
memcpy(&in_mbox[5], &ipaddr[12], 4);
in_mbox[6] = payload_size;
mrb->pid = pid;
rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
if (rval != QLA_SUCCESS)
goto exit_ping;
return rval;
exit_ping:
kfree(mrb);
return rval;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,256 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
{
writel(cmd, isp_nvram(ha));
readl(isp_nvram(ha));
udelay(1);
}
static inline int eeprom_size(struct scsi_qla_host *ha)
{
return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
}
static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
{
return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
FM93C86A_NO_ADDR_BITS_16 ;
}
static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
{
return FM93C56A_DATA_BITS_16;
}
static int fm93c56a_select(struct scsi_qla_host * ha)
{
DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
eeprom_cmd(ha->eeprom_cmd_data, ha);
return 1;
}
static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
{
int i;
int mask;
int dataBit;
int previousBit;
/* Clock in a zero, then do the start bit. */
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_FALL, ha);
mask = 1 << (FM93C56A_CMD_BITS - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < FM93C56A_CMD_BITS; i++) {
dataBit =
(cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
previousBit = dataBit;
}
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, ha);
cmd = cmd << 1;
}
mask = 1 << (eeprom_no_addr_bits(ha) - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
previousBit = dataBit;
}
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, ha);
addr = addr << 1;
}
return 1;
}
static int fm93c56a_deselect(struct scsi_qla_host * ha)
{
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
eeprom_cmd(ha->eeprom_cmd_data, ha);
return 1;
}
static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
{
int i;
int data = 0;
int dataBit;
/* Read the data bits
* The first bit is a dummy. Clock right over it. */
for (i = 0; i < eeprom_no_data_bits(ha); i++) {
eeprom_cmd(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_FALL, ha);
dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
data = (data << 1) | dataBit;
}
*value = data;
return 1;
}
static int eeprom_readword(int eepromAddr, u16 * value,
struct scsi_qla_host * ha)
{
fm93c56a_select(ha);
fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
fm93c56a_datain(ha, value);
fm93c56a_deselect(ha);
return 1;
}
/* Hardware_lock must be set before calling */
u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
{
u16 val = 0;
/* NOTE: NVRAM uses half-word addresses */
eeprom_readword(offset, &val, ha);
return val;
}
u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset)
{
u16 val = 0;
u8 rval = 0;
int index = 0;
if (offset & 0x1)
index = (offset - 1) / 2;
else
index = offset / 2;
val = le16_to_cpu(rd_nvram_word(ha, index));
if (offset & 0x1)
rval = (u8)((val & 0xff00) >> 8);
else
rval = (u8)((val & 0x00ff));
return rval;
}
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
{
int status = QLA_ERROR;
uint16_t checksum = 0;
uint32_t index;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (index = 0; index < eeprom_size(ha); index++)
checksum += rd_nvram_word(ha, index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (checksum == 0)
status = QLA_SUCCESS;
return status;
}
/*************************************************************************
*
* Hardware Semaphore routines
*
*************************************************************************/
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
unsigned int seconds = 30;
DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
"0x%x\n", ha->host_no, sem_mask, sem_bits));
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
"code = 0x%x\n", ha->host_no,
sem_mask, sem_bits));
return QLA_SUCCESS;
}
ssleep(1);
} while (--seconds);
return QLA_ERROR;
}
void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(sem_mask, isp_semaphore(ha));
readl(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
sem_mask));
}
int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
"0x%x, sema code=0x%x\n", ha->host_no,
sem_mask, sem_bits, value));
return 1;
}
return 0;
}

View file

@ -0,0 +1,254 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QL4XNVRM_H_
#define _QL4XNVRM_H_
/**
* AM29LV Flash definitions
**/
#define FM93C56A_SIZE_8 0x100
#define FM93C56A_SIZE_16 0x80
#define FM93C66A_SIZE_8 0x200
#define FM93C66A_SIZE_16 0x100/* 4010 */
#define FM93C86A_SIZE_16 0x400/* 4022 */
#define FM93C56A_START 0x1
/* Commands */
#define FM93C56A_READ 0x2
#define FM93C56A_WEN 0x0
#define FM93C56A_WRITE 0x1
#define FM93C56A_WRITE_ALL 0x0
#define FM93C56A_WDS 0x0
#define FM93C56A_ERASE 0x3
#define FM93C56A_ERASE_ALL 0x0
/* Command Extensions */
#define FM93C56A_WEN_EXT 0x3
#define FM93C56A_WRITE_ALL_EXT 0x1
#define FM93C56A_WDS_EXT 0x0
#define FM93C56A_ERASE_ALL_EXT 0x2
/* Address Bits */
#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */
#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */
#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */
/* Data Bits */
#define FM93C56A_DATA_BITS_16 16
#define FM93C56A_DATA_BITS_8 8
/* Special Bits */
#define FM93C56A_READ_DUMMY_BITS 1
#define FM93C56A_READY 0
#define FM93C56A_BUSY 1
#define FM93C56A_CMD_BITS 2
/* Auburn Bits */
#define AUBURN_EEPROM_DI 0x8
#define AUBURN_EEPROM_DI_0 0x0
#define AUBURN_EEPROM_DI_1 0x8
#define AUBURN_EEPROM_DO 0x4
#define AUBURN_EEPROM_DO_0 0x0
#define AUBURN_EEPROM_DO_1 0x4
#define AUBURN_EEPROM_CS 0x2
#define AUBURN_EEPROM_CS_0 0x0
#define AUBURN_EEPROM_CS_1 0x2
#define AUBURN_EEPROM_CLK_RISE 0x1
#define AUBURN_EEPROM_CLK_FALL 0x0
/**/
/* EEPROM format */
/**/
struct bios_params {
uint16_t SpinUpDelay:1;
uint16_t BIOSDisable:1;
uint16_t MMAPEnable:1;
uint16_t BootEnable:1;
uint16_t Reserved0:12;
uint8_t bootID0:7;
uint8_t bootID0Valid:1;
uint8_t bootLUN0[8];
uint8_t bootID1:7;
uint8_t bootID1Valid:1;
uint8_t bootLUN1[8];
uint16_t MaxLunsPerTarget;
uint8_t Reserved1[10];
};
struct eeprom_port_cfg {
/* MTU MAC 0 */
u16 etherMtu_mac;
/* Flow Control MAC 0 */
u16 pauseThreshold_mac;
u16 resumeThreshold_mac;
u16 reserved[13];
};
struct eeprom_function_cfg {
u8 reserved[30];
/* MAC ADDR */
u8 macAddress[6];
u8 macAddressSecondary[6];
u16 subsysVendorId;
u16 subsysDeviceId;
};
struct eeprom_data {
union {
struct { /* isp4010 */
u8 asic_id[4]; /* x00 */
u8 version; /* x04 */
u8 reserved; /* x05 */
u16 board_id; /* x06 */
#define EEPROM_BOARDID_ELDORADO 1
#define EEPROM_BOARDID_PLACER 2
#define EEPROM_SERIAL_NUM_SIZE 16
u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
/* ExtHwConfig: */
/* Offset = 24bytes
*
* | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | |
* |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
*/
u16 ext_hw_conf; /* x18 */
u8 mac0[6]; /* x1A */
u8 mac1[6]; /* x20 */
u8 mac2[6]; /* x26 */
u8 mac3[6]; /* x2C */
u16 etherMtu; /* x32 */
u16 macConfig; /* x34 */
#define MAC_CONFIG_ENABLE_ANEG 0x0001
#define MAC_CONFIG_ENABLE_PAUSE 0x0002
u16 phyConfig; /* x36 */
#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
u16 reserved_56; /* x38 */
#define EEPROM_UNUSED_1_SIZE 2
u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
u16 bufletSize; /* x3C */
u16 bufletCount; /* x3E */
u16 bufletPauseThreshold; /* x40 */
u16 tcpWindowThreshold50; /* x42 */
u16 tcpWindowThreshold25; /* x44 */
u16 tcpWindowThreshold0; /* x46 */
u16 ipHashTableBaseHi; /* x48 */
u16 ipHashTableBaseLo; /* x4A */
u16 ipHashTableSize; /* x4C */
u16 tcpHashTableBaseHi; /* x4E */
u16 tcpHashTableBaseLo; /* x50 */
u16 tcpHashTableSize; /* x52 */
u16 ncbTableBaseHi; /* x54 */
u16 ncbTableBaseLo; /* x56 */
u16 ncbTableSize; /* x58 */
u16 drbTableBaseHi; /* x5A */
u16 drbTableBaseLo; /* x5C */
u16 drbTableSize; /* x5E */
#define EEPROM_UNUSED_2_SIZE 4
u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
u16 ipReassemblyTimeout; /* x64 */
u16 tcpMaxWindowSizeHi; /* x66 */
u16 tcpMaxWindowSizeLo; /* x68 */
u32 net_ip_addr0; /* x6A Added for TOE
* functionality. */
u32 net_ip_addr1; /* x6E */
u32 scsi_ip_addr0; /* x72 */
u32 scsi_ip_addr1; /* x76 */
#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account
* for ip addresses */
u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
u16 subsysVendorId_f0; /* xFA */
u16 subsysDeviceId_f0; /* xFC */
/* Address = 0x7F */
#define FM93C56A_SIGNATURE 0x9356
#define FM93C66A_SIGNATURE 0x9366
u16 signature; /* xFE */
#define EEPROM_UNUSED_4_SIZE 250
u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
u16 subsysVendorId_f1; /* x1FA */
u16 subsysDeviceId_f1; /* x1FC */
u16 checksum; /* x1FE */
} __attribute__ ((packed)) isp4010;
struct { /* isp4022 */
u8 asicId[4]; /* x00 */
u8 version; /* x04 */
u8 reserved_5; /* x05 */
u16 boardId; /* x06 */
u8 boardIdStr[16]; /* x08 */
u8 serialNumber[16]; /* x18 */
/* External Hardware Configuration */
u16 ext_hw_conf; /* x28 */
/* MAC 0 CONFIGURATION */
struct eeprom_port_cfg macCfg_port0; /* x2A */
/* MAC 1 CONFIGURATION */
struct eeprom_port_cfg macCfg_port1; /* x4A */
/* DDR SDRAM Configuration */
u16 bufletSize; /* x6A */
u16 bufletCount; /* x6C */
u16 tcpWindowThreshold50; /* x6E */
u16 tcpWindowThreshold25; /* x70 */
u16 tcpWindowThreshold0; /* x72 */
u16 ipHashTableBaseHi; /* x74 */
u16 ipHashTableBaseLo; /* x76 */
u16 ipHashTableSize; /* x78 */
u16 tcpHashTableBaseHi; /* x7A */
u16 tcpHashTableBaseLo; /* x7C */
u16 tcpHashTableSize; /* x7E */
u16 ncbTableBaseHi; /* x80 */
u16 ncbTableBaseLo; /* x82 */
u16 ncbTableSize; /* x84 */
u16 drbTableBaseHi; /* x86 */
u16 drbTableBaseLo; /* x88 */
u16 drbTableSize; /* x8A */
u16 reserved_142[4]; /* x8C */
/* TCP/IP Parameters */
u16 ipReassemblyTimeout; /* x94 */
u16 tcpMaxWindowSize; /* x96 */
u16 ipSecurity; /* x98 */
u8 reserved_156[294]; /* x9A */
u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */
struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */
u16 reserved_510; /* x1FE */
/* Address = 512 */
u8 oemSpace[432]; /* x200 */
struct bios_params sBIOSParams_fn1; /* x3B0 */
struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */
u16 reserved_1022; /* x3FE */
/* Address = 1024 */
u8 reserved_1024[464]; /* x400 */
struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */
u16 reserved_1534; /* x5FE */
/* Address = 1536 */
u8 reserved_1536[432]; /* x600 */
struct bios_params sBIOSParams_fn3; /* x7B0 */
struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */
u16 checksum; /* x7FE */
} __attribute__ ((packed)) isp4022;
};
};
#endif /* _QL4XNVRM_H_ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,8 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"