aboutsummaryrefslogtreecommitdiff
path: root/drivers/message
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/fusion/mptbase.c14
-rw-r--r--drivers/message/fusion/mptbase.h34
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c24
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptscsih.c968
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c24
-rw-r--r--drivers/message/i2o/Kconfig12
-rw-r--r--drivers/message/i2o/bus-osm.c23
-rw-r--r--drivers/message/i2o/config-osm.c2
-rw-r--r--drivers/message/i2o/core.h20
-rw-r--r--drivers/message/i2o/device.c339
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c114
-rw-r--r--drivers/message/i2o/i2o_block.c190
-rw-r--r--drivers/message/i2o/i2o_config.c196
-rw-r--r--drivers/message/i2o/i2o_lan.h38
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c89
-rw-r--r--drivers/message/i2o/iop.c356
-rw-r--r--drivers/message/i2o/pci.c7
22 files changed, 1229 insertions, 1296 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 4262a22adc2..537836068c4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -313,13 +313,13 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
if (ioc->bus_type == FC)
mpt_fc_log_info(ioc, log_info);
- else if (ioc->bus_type == SCSI)
+ else if (ioc->bus_type == SPI)
mpt_sp_log_info(ioc, log_info);
else if (ioc->bus_type == SAS)
mpt_sas_log_info(ioc, log_info);
}
if (ioc_stat & MPI_IOCSTATUS_MASK) {
- if (ioc->bus_type == SCSI &&
+ if (ioc->bus_type == SPI &&
cb_idx != mpt_stm_index &&
cb_idx != mpt_lan_index)
mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
@@ -1376,7 +1376,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
ioc->prod_name = "LSI53C1030";
- ioc->bus_type = SCSI;
+ ioc->bus_type = SPI;
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
@@ -1389,7 +1389,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
ioc->prod_name = "LSI53C1035";
- ioc->bus_type = SCSI;
+ ioc->bus_type = SPI;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
ioc->prod_name = "LSISAS1064";
@@ -3042,7 +3042,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
/* Clear the internal flash bad bit - autoincrementing register,
* so must do two writes.
*/
- if (ioc->bus_type == SCSI) {
+ if (ioc->bus_type == SPI) {
/*
* 1030 and 1035 H/W errata, workaround to access
* the ClearFlashBadSignatureBit
@@ -3152,7 +3152,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
int cnt,cntdn;
dinitprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name));
- if (ioc->bus_type == SCSI) {
+ if (ioc->bus_type == SPI) {
/* Always issue a Msg Unit Reset first. This will clear some
* SCSI bus hang conditions.
*/
@@ -3580,7 +3580,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
dinitprintk((KERN_INFO MYNAM ": %s Now numSGE=%d num_sge=%d num_chain=%d\n",
ioc->name, numSGE, num_sge, num_chain));
- if (ioc->bus_type == SCSI)
+ if (ioc->bus_type == SPI)
num_chain *= MPT_SCSI_CAN_QUEUE;
else
num_chain *= MPT_FC_CAN_QUEUE;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index bac8eb4186d..6c48d1f54ac 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.03.04"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.04"
+#define MPT_LINUX_VERSION_COMMON "3.03.05"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.05"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -321,7 +321,7 @@ typedef struct _SYSIF_REGS
* Dynamic Multi-Pathing specific stuff...
*/
-/* VirtDevice negoFlags field */
+/* VirtTarget negoFlags field */
#define MPT_TARGET_NO_NEGO_WIDE 0x01
#define MPT_TARGET_NO_NEGO_SYNC 0x02
#define MPT_TARGET_NO_NEGO_QAS 0x04
@@ -330,8 +330,7 @@ typedef struct _SYSIF_REGS
/*
* VirtDevice - FC LUN device or SCSI target device
*/
-typedef struct _VirtDevice {
- struct scsi_device *device;
+typedef struct _VirtTarget {
u8 tflags;
u8 ioc_id;
u8 target_id;
@@ -342,21 +341,18 @@ typedef struct _VirtDevice {
u8 negoFlags; /* bit field, see above */
u8 raidVolume; /* set, if RAID Volume */
u8 type; /* byte 0 of Inquiry data */
- u8 cflags; /* controller flags */
- u8 rsvd1raid;
- u16 fc_phys_lun;
- u16 fc_xlat_lun;
u32 num_luns;
u32 luns[8]; /* Max LUNs is 256 */
- u8 pad[4];
u8 inq_data[8];
- /* IEEE Registered Extended Identifier
- obtained via INQUIRY VPD page 0x83 */
- /* NOTE: Do not separate uniq_prepad and uniq_data
- as they are treateed as a single entity in the code */
- u8 uniq_prepad[8];
- u8 uniq_data[20];
- u8 pad2[4];
+} VirtTarget;
+
+typedef struct _VirtDevice {
+ VirtTarget *vtarget;
+ u8 ioc_id;
+ u8 bus_id;
+ u8 target_id;
+ u8 configured_lun;
+ u32 lun;
} VirtDevice;
/*
@@ -903,7 +899,7 @@ typedef struct _MPT_LOCAL_REPLY {
typedef enum {
FC,
- SCSI,
+ SPI,
SAS
} BUS_TYPE;
@@ -912,7 +908,7 @@ typedef struct _MPT_SCSI_HOST {
int port;
u32 pad0;
struct scsi_cmnd **ScsiLookup;
- VirtDevice **Targets;
+ VirtTarget **Targets;
MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
struct timer_list timer;
/* Pool of memory for holding SCpnts before doing
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 602138f8544..959d2c5951b 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1245,7 +1245,7 @@ mptctl_gettargetinfo (unsigned long arg)
MPT_ADAPTER *ioc;
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
- VirtDevice *vdev;
+ VirtTarget *vdev;
char *pmem;
int *pdata;
IOCPage2_t *pIoc2;
@@ -1822,7 +1822,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
case MPI_FUNCTION_SCSI_IO_REQUEST:
if (ioc->sh) {
SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
- VirtDevice *pTarget = NULL;
+ VirtTarget *pTarget = NULL;
MPT_SCSI_HOST *hd = NULL;
int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
int scsidir = 0;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a628be9bbba..ba61e182885 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -84,13 +84,16 @@ static int mptfcTaskCtx = -1;
static int mptfcInternalCtx = -1; /* Used only for internal commands */
static struct scsi_host_template mptfc_driver_template = {
+ .module = THIS_MODULE,
.proc_name = "mptfc",
.proc_info = mptscsih_proc_info,
.name = "MPT FC Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
+ .target_alloc = mptscsih_target_alloc,
.slave_alloc = mptscsih_slave_alloc,
.slave_configure = mptscsih_slave_configure,
+ .target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
@@ -167,13 +170,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
- return -ENODEV;
+ error = -ENODEV;
+ goto out_mptfc_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
- return -ENODEV;
+ error = -ENODEV;
+ goto out_mptfc_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
@@ -198,7 +203,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
- return -1;
+ error = -1;
+ goto out_mptfc_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
@@ -266,7 +272,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
- goto mptfc_probe_failed;
+ goto out_mptfc_probe;
}
memset(mem, 0, sz);
@@ -284,14 +290,14 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
- goto mptfc_probe_failed;
+ goto out_mptfc_probe;
}
memset(mem, 0, sz);
- hd->Targets = (VirtDevice **) mem;
+ hd->Targets = (VirtTarget **) mem;
dprintk((KERN_INFO
- " Targets @ %p, sz=%d\n", hd->Targets, sz));
+ " vdev @ %p, sz=%d\n", hd->Targets, sz));
/* Clear the TM flags
*/
@@ -330,13 +336,13 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(error) {
dprintk((KERN_ERR MYNAM
"scsi_add_host failed\n"));
- goto mptfc_probe_failed;
+ goto out_mptfc_probe;
}
scsi_scan_host(sh);
return 0;
-mptfc_probe_failed:
+out_mptfc_probe:
mptscsih_remove(pdev);
return error;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e0a8bb8ba7d..17e9757e728 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -228,31 +228,35 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
* implement ->target_alloc.
*/
static int
-mptsas_slave_alloc(struct scsi_device *device)
+mptsas_slave_alloc(struct scsi_device *sdev)
{
- struct Scsi_Host *host = device->host;
+ struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
+ VirtTarget *vtarget;
VirtDevice *vdev;
- uint target = device->id;
+ struct scsi_target *starget;
int i;
- if ((vdev = hd->Targets[target]) != NULL)
- goto out;
-
vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdev) {
printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
hd->ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
-
memset(vdev, 0, sizeof(VirtDevice));
- vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
vdev->ioc_id = hd->ioc->id;
+ sdev->hostdata = vdev;
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdev->vtarget = vtarget;
+ if (vtarget->num_luns == 0) {
+ vtarget->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
+ hd->Targets[sdev->id] = vtarget;
+ }
- rphy = dev_to_rphy(device->sdev_target->dev.parent);
+ rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
list_for_each_entry(p, &hd->ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address ==
@@ -260,7 +264,7 @@ mptsas_slave_alloc(struct scsi_device *device)
vdev->target_id =
p->phy_info[i].attached.target;
vdev->bus_id = p->phy_info[i].attached.bus;
- hd->Targets[device->id] = vdev;
+ vdev->lun = sdev->lun;
goto out;
}
}
@@ -271,19 +275,24 @@ mptsas_slave_alloc(struct scsi_device *device)
return -ENODEV;
out:
- vdev->num_luns++;
- device->hostdata = vdev;
+ vtarget->ioc_id = vdev->ioc_id;
+ vtarget->target_id = vdev->target_id;
+ vtarget->bus_id = vdev->bus_id;
+ vtarget->num_luns++;
return 0;
}
static struct scsi_host_template mptsas_driver_template = {
+ .module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
+ .target_alloc = mptscsih_target_alloc,
.slave_alloc = mptsas_slave_alloc,
.slave_configure = mptscsih_slave_configure,
+ .target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
@@ -986,7 +995,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
goto out_free_port_info;
list_add_tail(&port_info->list, &ioc->sas_topology);
-
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
(MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
@@ -1133,13 +1141,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
- return -ENODEV;
+ error = -ENODEV;
+ goto out_mptsas_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
- return -ENODEV;
+ error = -ENODEV;
+ goto out_mptsas_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
@@ -1163,7 +1173,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
- return -1;
+ error = -1;
+ goto out_mptsas_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
@@ -1237,7 +1248,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
- goto mptsas_probe_failed;
+ goto out_mptsas_probe;
}
memset(mem, 0, sz);
@@ -1255,14 +1266,14 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
- goto mptsas_probe_failed;
+ goto out_mptsas_probe;
}
memset(mem, 0, sz);
- hd->Targets = (VirtDevice **) mem;
+ hd->Targets = (VirtTarget **) mem;
dprintk((KERN_INFO
- " Targets @ %p, sz=%d\n", hd->Targets, sz));
+ " vtarget @ %p, sz=%d\n", hd->Targets, sz));
/* Clear the TM flags
*/
@@ -1308,14 +1319,14 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (error) {
dprintk((KERN_ERR MYNAM
"scsi_add_host failed\n"));
- goto mptsas_probe_failed;
+ goto out_mptsas_probe;
}
mptsas_scan_sas_topology(ioc);
return 0;
-mptsas_probe_failed:
+out_mptsas_probe:
mptscsih_remove(pdev);
return error;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index b7b9846ff3f..93a16fa3c4b 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -150,28 +150,29 @@ static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 tar
int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
-static void mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen);
-static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56);
-static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq);
+static void mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data, int dlen);
+static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *vtarget, char byte56);
static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags);
-static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id);
+static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc);
static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags);
static int mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus);
int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
-static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
+static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
+static void mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget);
+static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
static struct work_struct mptscsih_persistTask;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
static void mptscsih_domainValidation(void *hd);
-static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
+static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc);
#endif
void mptscsih_remove(struct pci_dev *);
@@ -627,7 +628,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
"IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
"resid=%d bufflen=%d xfer_cnt=%d\n",
- ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
+ ioc->id, sc->device->id, sc->device->lun,
status, scsi_state, scsi_status, sc->resid,
sc->request_bufflen, xfer_cnt));
@@ -641,7 +642,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
pScsiReply->ResponseInfo) {
printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
"FCP_ResponseInfo=%08xh\n",
- ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
+ ioc->id, sc->device->id, sc->device->lun,
le32_to_cpu(pScsiReply->ResponseInfo));
}
@@ -677,8 +678,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
sc->result = DID_RESET << 16;
/* GEM Workaround. */
- if (ioc->bus_type == SCSI)
- mptscsih_no_negotiate(hd, sc->device->id);
+ if (ioc->bus_type == SPI)
+ mptscsih_no_negotiate(hd, sc);
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -892,16 +893,15 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
* when a lun is disable by mid-layer.
* Do NOT access the referenced scsi_cmnd structure or
* members. Will cause either a paging or NULL ptr error.
- * @hd: Pointer to a SCSI HOST structure
- * @target: target id
- * @lun: lun
+ * @hd: Pointer to a SCSI HOST structure
+ * @vdevice: per device private data
*
* Returns: None.
*
* Called from slave_destroy.
*/
static void
-mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
+mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
SCSIIORequest_t *mf = NULL;
int ii;
@@ -909,7 +909,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
struct scsi_cmnd *sc;
dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
- target, lun, max));
+ vdevice->target_id, vdevice->lun, max));
for (ii=0; ii < max; ii++) {
if ((sc = hd->ScsiLookup[ii]) != NULL) {
@@ -919,7 +919,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
- if ((mf->TargetID != ((u8)target)) || (mf->LUN[1] != ((u8) lun)))
+ if ((mf->TargetID != ((u8)vdevice->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun)))
continue;
/* Cleanup
@@ -993,8 +993,10 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct Scsi_Host *host = ioc->sh;
MPT_SCSI_HOST *hd;
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
int count;
unsigned long flags;
+#endif
int sz1;
if(!host) {
@@ -1075,11 +1077,6 @@ mptscsih_shutdown(struct pci_dev *pdev)
hd = (MPT_SCSI_HOST *)host->hostdata;
- /* Flush the cache of this adapter
- */
- if(hd != NULL)
- mptscsih_synchronize_cache(hd, 0);
-
}
#ifdef CONFIG_PM
@@ -1286,7 +1283,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
MPT_SCSI_HOST *hd;
MPT_FRAME_HDR *mf;
SCSIIORequest_t *pScsiReq;
- VirtDevice *pTarget = SCpnt->device->hostdata;
+ VirtDevice *vdev = SCpnt->device->hostdata;
int lun;
u32 datalen;
u32 scsictl;
@@ -1341,8 +1338,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if (pTarget
- && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
+ if (vdev
+ && (vdev->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
} else {
@@ -1351,8 +1348,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Use the above information to set up the message frame
*/
- pScsiReq->TargetID = (u8) pTarget->target_id;
- pScsiReq->Bus = pTarget->bus_id;
+ pScsiReq->TargetID = (u8) vdev->target_id;
+ pScsiReq->Bus = vdev->bus_id;
pScsiReq->ChainOffset = 0;
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1403,8 +1400,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
SCpnt->host_scribble = NULL;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
- if (hd->ioc->bus_type == SCSI) {
- int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id];
+ if (hd->ioc->bus_type == SPI) {
+ int dvStatus = hd->ioc->spi_data.dvStatus[vdev->target_id];
int issueCmd = 1;
if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1437,7 +1434,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
/* Set the DV flags.
*/
if (dvStatus & MPT_SCSICFG_DV_NOT_DONE)
- mptscsih_set_dvflags(hd, pScsiReq);
+ mptscsih_set_dvflags(hd, SCpnt);
if (!issueCmd)
goto fail;
@@ -1741,6 +1738,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
u32 ctx2abort;
int scpnt_idx;
int retval;
+ VirtDevice *vdev;
/* If we can't locate our host adapter structure, return FAILED status.
*/
@@ -1790,8 +1788,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
hd->abortSCpnt = SCpnt;
+ vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
+ vdev->bus_id, vdev->target_id, vdev->lun,
ctx2abort, 2 /* 2 second timeout */);
printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
@@ -1822,6 +1821,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
+ VirtDevice *vdev;
/* If we can't locate our host adapter structure, return FAILED status.
*/
@@ -1839,8 +1839,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
hd->ioc->name, SCpnt);
scsi_print_command(SCpnt);
+ vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- SCpnt->device->channel, SCpnt->device->id,
+ vdev->bus_id, vdev->target_id,
0, 0, 5 /* 5 second timeout */);
printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
@@ -1871,6 +1872,7 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
+ VirtDevice *vdev;
/* If we can't locate our host adapter structure, return FAILED status.
*/
@@ -1888,8 +1890,9 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
if (hd->timeouts < -1)
hd->timeouts++;
+ vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
- SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */);
+ vdev->bus_id, 0, 0, 0, 5 /* 5 second timeout */);
printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
hd->ioc->name,
@@ -2151,23 +2154,36 @@ mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* OS entry point to allow host driver to alloc memory
+ * for each scsi target. Called once per device the bus scan.
+ * Return non-zero if allocation fails.
+ */
+int
+mptscsih_target_alloc(struct scsi_target *starget)
+{
+ VirtTarget *vtarget;
+
+ vtarget = kmalloc(sizeof(VirtTarget), GFP_KERNEL);
+ if (!vtarget)
+ return -ENOMEM;
+ memset(vtarget, 0, sizeof(VirtTarget));
+ starget->hostdata = vtarget;
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * OS entry point to allow host driver to alloc memory
* for each scsi device. Called once per device the bus scan.
* Return non-zero if allocation fails.
- * Init memory once per id (not LUN).
*/
int
-mptscsih_slave_alloc(struct scsi_device *device)
+mptscsih_slave_alloc(struct scsi_device *sdev)
{
- struct Scsi_Host *host = device->host;
+ struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ VirtTarget *vtarget;
VirtDevice *vdev;
- uint target = device->id;
-
- if (hd == NULL)
- return -ENODEV;
-
- if ((vdev = hd->Targets[target]) != NULL)
- goto out;
+ struct scsi_target *starget;
vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdev) {
@@ -2177,25 +2193,33 @@ mptscsih_slave_alloc(struct scsi_device *device)
}
memset(vdev, 0, sizeof(VirtDevice));
- vdev->tflags = MPT_TARGET_FLAGS_Q_YES;
vdev->ioc_id = hd->ioc->id;
- vdev->target_id = device->id;
- vdev->bus_id = device->channel;
- vdev->raidVolume = 0;
- hd->Targets[device->id] = vdev;
- if (hd->ioc->bus_type == SCSI) {
- if (hd->ioc->raid_data.isRaid & (1 << device->id)) {
- vdev->raidVolume = 1;
- ddvtprintk((KERN_INFO
- "RAID Volume @ id %d\n", device->id));
+ vdev->target_id = sdev->id;
+ vdev->bus_id = sdev->channel;
+ vdev->lun = sdev->lun;
+ sdev->hostdata = vdev;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdev->vtarget = vtarget;
+
+ if (vtarget->num_luns == 0) {
+ hd->Targets[sdev->id] = vtarget;
+ vtarget->ioc_id = hd->ioc->id;
+ vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
+ vtarget->target_id = sdev->id;
+ vtarget->bus_id = sdev->channel;
+ if (hd->ioc->bus_type == SPI) {
+ if (hd->ioc->raid_data.isRaid & (1 << sdev->id)) {
+ vtarget->raidVolume = 1;
+ ddvtprintk((KERN_INFO
+ "RAID Volume @ id %d\n", sdev->id));
+ }
+ } else {
+ vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
}
- } else {
- vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
}
-
- out:
- vdev->num_luns++;
- device->hostdata = vdev;
+ vtarget->num_luns++;
return 0;
}
@@ -2204,40 +2228,52 @@ mptscsih_slave_alloc(struct scsi_device *device)
* Called if no device present or device being unloaded
*/
void
-mptscsih_slave_destroy(struct scsi_device *device)
+mptscsih_target_destroy(struct scsi_target *starget)
{
- struct Scsi_Host *host = device->host;
- MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
- VirtDevice *vdev;
- uint target = device->id;
- uint lun = device->lun;
-
- if (hd == NULL)
- return;
-
- mptscsih_search_running_cmds(hd, target, lun);
-
- vdev = hd->Targets[target];
- vdev->luns[0] &= ~(1 << lun);
- if (--vdev->num_luns)
- return;
-
- kfree(hd->Targets[target]);
- hd->Targets[target] = NULL;
-
- if (hd->ioc->bus_type == SCSI) {
- if (mptscsih_is_phys_disk(hd->ioc, target)) {
- hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
- } else {
- hd->ioc->spi_data.dvStatus[target] =
- MPT_SCSICFG_NEGOTIATE;
+ if (starget->hostdata)
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
- if (!hd->negoNvram) {
- hd->ioc->spi_data.dvStatus[target] |=
- MPT_SCSICFG_DV_NOT_DONE;
+/*
+ * OS entry point to allow for host driver to free allocated memory
+ * Called if no device present or device being unloaded
+ */
+void
+mptscsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *host = sdev->host;
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ VirtTarget *vtarget;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdevice = sdev->hostdata;
+
+ mptscsih_search_running_cmds(hd, vdevice);
+ vtarget->luns[0] &= ~(1 << vdevice->lun);
+ vtarget->num_luns--;
+ if (vtarget->num_luns == 0) {
+ mptscsih_negotiate_to_asyn_narrow(hd, vtarget);
+ if (hd->ioc->bus_type == SPI) {
+ if (mptscsih_is_phys_disk(hd->ioc, vtarget->target_id)) {
+ hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
+ } else {
+ hd->ioc->spi_data.dvStatus[vtarget->target_id] =
+ MPT_SCSICFG_NEGOTIATE;
+ if (!hd->negoNvram) {
+ hd->ioc->spi_data.dvStatus[vtarget->target_id] |=
+ MPT_SCSICFG_DV_NOT_DONE;
+ }
}
}
+ hd->Targets[sdev->id] = NULL;
}
+ mptscsih_synchronize_cache(hd, vdevice);
+ kfree(vdevice);
+ sdev->hostdata = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2251,22 +2287,21 @@ mptscsih_slave_destroy(struct scsi_device *device)
int
mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
- MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
- VirtDevice *pTarget;
- int max_depth;
- int tagged;
-
- if (hd == NULL)
- return 0;
- if (!(pTarget = hd->Targets[sdev->id]))
- return 0;
-
- if (hd->ioc->bus_type == SCSI) {
- if (pTarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) {
- if (!(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES))
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
+ VirtTarget *vtarget;
+ struct scsi_target *starget;
+ int max_depth;
+ int tagged;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+
+ if (hd->ioc->bus_type == SPI) {
+ if (vtarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) {
+ if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
max_depth = 1;
- else if (((pTarget->inq_data[0] & 0x1f) == 0x00) &&
- (pTarget->minSyncFactor <= MPT_ULTRA160 ))
+ else if (((vtarget->inq_data[0] & 0x1f) == 0x00) &&
+ (vtarget->minSyncFactor <= MPT_ULTRA160 ))
max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
else
max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
@@ -2295,64 +2330,58 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
* Return non-zero if fails.
*/
int
-mptscsih_slave_configure(struct scsi_device *device)
+mptscsih_slave_configure(struct scsi_device *sdev)
{
- struct Scsi_Host *sh = device->host;
- VirtDevice *pTarget;
+ struct Scsi_Host *sh = sdev->host;
+ VirtTarget *vtarget;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sh->hostdata;
+ int indexed_lun, lun_index;
- if ((hd == NULL) || (hd->Targets == NULL)) {
- return 0;
- }
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdevice = sdev->hostdata;
dsprintk((MYIOC_s_INFO_FMT
"device @ %p, id=%d, LUN=%d, channel=%d\n",
- hd->ioc->name, device, device->id, device->lun, device->channel));
- dsprintk((MYIOC_s_INFO_FMT
- "sdtr %d wdtr %d ppr %d inq length=%d\n",
- hd->ioc->name, device->sdtr, device->wdtr,
- device->ppr, device->inquiry_len));
-
- if (device->id > sh->max_id) {
+ hd->ioc->name, sdev, sdev->id, sdev->lun, sdev->channel));
+ if (hd->ioc->bus_type == SPI)
+ dsprintk((MYIOC_s_INFO_FMT
+ "sdtr %d wdtr %d ppr %d inq length=%d\n",
+ hd->ioc->name, sdev->sdtr, sdev->wdtr,
+ sdev->ppr, sdev->inquiry_len));
+
+ if (sdev->id > sh->max_id) {
/* error case, should never happen */
- scsi_adjust_queue_depth(device, 0, 1);
- goto slave_configure_exit;
- }
-
- pTarget = hd->Targets[device->id];
-
- if (pTarget == NULL) {
- /* Driver doesn't know about this device.
- * Kernel may generate a "Dummy Lun 0" which
- * may become a real Lun if a
- * "scsi add-single-device" command is executed
- * while the driver is active (hot-plug a
- * device). LSI Raid controllers need
- * queue_depth set to DEV_HIGH for this reason.
- */
- scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
- MPT_SCSI_CMD_PER_DEV_HIGH);
+ scsi_adjust_queue_depth(sdev, 0, 1);
goto slave_configure_exit;
}
- mptscsih_initTarget(hd, device->channel, device->id, device->lun,
- device->inquiry, device->inquiry_len );
- mptscsih_change_queue_depth(device, MPT_SCSI_CMD_PER_DEV_HIGH);
+ vdevice->configured_lun=1;
+ lun_index = (vdevice->lun >> 5); /* 32 luns per lun_index */
+ indexed_lun = (vdevice->lun % 32);
+ vtarget->luns[lun_index] |= (1 << indexed_lun);
+ mptscsih_initTarget(hd, vtarget, sdev->lun, sdev->inquiry,
+ sdev->inquiry_len );
+ mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
dsprintk((MYIOC_s_INFO_FMT
"Queue depth=%d, tflags=%x\n",
- hd->ioc->name, device->queue_depth, pTarget->tflags));
+ hd->ioc->name, sdev->queue_depth, vtarget->tflags));
- dsprintk((MYIOC_s_INFO_FMT
- "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
- hd->ioc->name, pTarget->negoFlags, pTarget->maxOffset, pTarget->minSyncFactor));
+ if (hd->ioc->bus_type == SPI)
+ dsprintk((MYIOC_s_INFO_FMT
+ "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
+ hd->ioc->name, vtarget->negoFlags, vtarget->maxOffset,
+ vtarget->minSyncFactor));
slave_configure_exit:
dsprintk((MYIOC_s_INFO_FMT
"tagged %d, simple %d, ordered %d\n",
- hd->ioc->name,device->tagged_supported, device->simple_tags,
- device->ordered_tags));
+ hd->ioc->name,sdev->tagged_supported, sdev->simple_tags,
+ sdev->ordered_tags));
return 0;
}
@@ -2370,16 +2399,14 @@ slave_configure_exit:
static void
mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
{
- VirtDevice *target;
+ VirtDevice *vdev;
SCSIIORequest_t *pReq;
u32 sense_count = le32_to_cpu(pScsiReply->SenseCount);
- int index;
/* Get target structure
*/
pReq = (SCSIIORequest_t *) mf;
- index = (int) pReq->TargetID;
- target = hd->Targets[index];
+ vdev = sc->device->hostdata;
if (sense_count) {
u8 *sense_data;
@@ -2393,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
*/
if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
- if ((sense_data[12] == 0x5D) && (target->raidVolume == 0)) {
+ if ((sense_data[12] == 0x5D) && (vdev->vtarget->raidVolume == 0)) {
int idx;
MPT_ADAPTER *ioc = hd->ioc;
@@ -2403,7 +2430,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
ioc->events[idx].data[0] = (pReq->LUN[1] << 24) ||
(MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) ||
- (pReq->Bus << 8) || pReq->TargetID;
+ (sc->device->channel << 8) || sc->device->id;
ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
@@ -2503,9 +2530,9 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* 2. Chain Buffer initialization
*/
- /* 4. Renegotiate to all devices, if SCSI
+ /* 4. Renegotiate to all devices, if SPI
*/
- if (ioc->bus_type == SCSI) {
+ if (ioc->bus_type == SPI) {
dnegoprintk(("writeSDP1: ALL_IDS USE_NVRAM\n"));
mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM);
}
@@ -2534,7 +2561,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* 7. Set flag to force DV and re-read IOC Page 3
*/
- if (ioc->bus_type == SCSI) {
+ if (ioc->bus_type == SPI) {
ioc->spi_data.forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
ddvtprintk(("Set reload IOC Pg3 Flag\n"));
}
@@ -2576,7 +2603,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
break;
case MPI_EVENT_IOC_BUS_RESET: /* 04 */
case MPI_EVENT_EXT_BUS_RESET: /* 05 */
- if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
+ if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
hd->soft_resets++;
break;
case MPI_EVENT_LOGOUT: /* 09 */
@@ -2597,11 +2624,11 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
case MPI_EVENT_INTEGRATED_RAID: /* 0B */
{
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
pMpiEventDataRaid_t pRaidEventData =
(pMpiEventDataRaid_t) pEvReply->Data;
-#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/* Domain Validation Needed */
- if (ioc->bus_type == SCSI &&
+ if (ioc->bus_type == SPI &&
pRaidEventData->ReasonCode ==
MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
@@ -2632,8 +2659,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
/*
* mptscsih_initTarget - Target, LUN alloc/free functionality.
* @hd: Pointer to MPT_SCSI_HOST structure
- * @bus_id: Bus number (?)
- * @target_id: SCSI target id
+ * @vtarget: per target private data
* @lun: SCSI LUN id
* @data: Pointer to data
* @dlen: Number of INQUIRY bytes
@@ -2646,15 +2672,14 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
*
*/
static void
-mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen)
+mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data, int dlen)
{
- int indexed_lun, lun_index;
- VirtDevice *vdev;
SpiCfgData *pSpi;
char data_56;
+ int inq_len;
dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
- hd->ioc->name, bus_id, target_id, lun, hd));
+ hd->ioc->name, vtarget->bus_id, vtarget->target_id, lun, hd));
/*
* If the peripheral qualifier filter is enabled then if the target reports a 0x1
@@ -2674,75 +2699,68 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
if (data[0] & 0xe0)
return;
- if ((vdev = hd->Targets[target_id]) == NULL) {
+ if (vtarget == NULL)
return;
- }
- lun_index = (lun >> 5); /* 32 luns per lun_index */
- indexed_lun = (lun % 32);
- vdev->luns[lun_index] |= (1 << indexed_lun);
-
- if (hd->ioc->bus_type == SCSI) {
- if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
- /* Treat all Processors as SAF-TE if
- * command line option is set */
- vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
- mptscsih_writeIOCPage4(hd, target_id, bus_id);
- }else if ((data[0] == TYPE_PROCESSOR) &&
- !(vdev->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
- if ( dlen > 49 ) {
- vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
- if ( data[44] == 'S' &&
- data[45] == 'A' &&
- data[46] == 'F' &&
- data[47] == '-' &&
- data[48] == 'T' &&
- data[49] == 'E' ) {
- vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
- mptscsih_writeIOCPage4(hd, target_id, bus_id);
- }
+ if (data)
+ vtarget->type = data[0];
+
+ if (hd->ioc->bus_type != SPI)
+ return;
+
+ if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
+ /* Treat all Processors as SAF-TE if
+ * command line option is set */
+ vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
+ mptscsih_writeIOCPage4(hd, vtarget->target_id, vtarget->bus_id);
+ }else if ((data[0] == TYPE_PROCESSOR) &&
+ !(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
+ if ( dlen > 49 ) {
+ vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
+ if ( data[44] == 'S' &&
+ data[45] == 'A' &&
+ data[46] == 'F' &&
+ data[47] == '-' &&
+ data[48] == 'T' &&
+ data[49] == 'E' ) {
+ vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
+ mptscsih_writeIOCPage4(hd, vtarget->target_id, vtarget->bus_id);
}
}
- if (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) {
- if ( dlen > 8 ) {
- memcpy (vdev->inq_data, data, 8);
- } else {
- memcpy (vdev->inq_data, data, dlen);
- }
+ }
+ if (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) {
+ inq_len = dlen < 8 ? dlen : 8;
+ memcpy (vtarget->inq_data, data, inq_len);
+ /* If have not done DV, set the DV flag.
+ */
+ pSpi = &hd->ioc->spi_data;
+ if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) {
+ if (pSpi->dvStatus[vtarget->target_id] & MPT_SCSICFG_DV_NOT_DONE)
+ pSpi->dvStatus[vtarget->target_id] |= MPT_SCSICFG_NEED_DV;
+ }
+ vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
- /* If have not done DV, set the DV flag.
+ data_56 = 0x0F; /* Default to full capabilities if Inq data length is < 57 */
+ if (dlen > 56) {
+ if ( (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_56))) {
+ /* Update the target capabilities
*/
- pSpi = &hd->ioc->spi_data;
- if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) {
- if (pSpi->dvStatus[target_id] & MPT_SCSICFG_DV_NOT_DONE)
- pSpi->dvStatus[target_id] |= MPT_SCSICFG_NEED_DV;
- }
-
- vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
-
-
- data_56 = 0x0F; /* Default to full capabilities if Inq data length is < 57 */
- if (dlen > 56) {
- if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
- /* Update the target capabilities
- */
- data_56 = data[56];
- vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
- }
+ data_56 = data[56];
+ vtarget->tflags |= MPT_TARGET_FLAGS_VALID_56;
}
- mptscsih_setTargetNegoParms(hd, vdev, data_56);
- } else {
- /* Initial Inquiry may not request enough data bytes to
- * obtain byte 57. DV will; if target doesn't return
- * at least 57 bytes, data[56] will be zero. */
- if (dlen > 56) {
- if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
- /* Update the target capabilities
- */
- data_56 = data[56];
- vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
- mptscsih_setTargetNegoParms(hd, vdev, data_56);
- }
+ }
+ mptscsih_setTargetNegoParms(hd, vtarget, data_56);
+ } else {
+ /* Initial Inquiry may not request enough data bytes to
+ * obtain byte 57. DV will; if target doesn't return
+ * at least 57 bytes, data[56] will be zero. */
+ if (dlen > 56) {
+ if ( (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_56))) {
+ /* Update the target capabilities
+ */
+ data_56 = data[56];
+ vtarget->tflags |= MPT_TARGET_FLAGS_VALID_56;
+ mptscsih_setTargetNegoParms(hd, vtarget, data_56);
}
}
}
@@ -2755,12 +2773,12 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
*
*/
static void
-mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
+mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, char byte56)
{
SpiCfgData *pspi_data = &hd->ioc->spi_data;
int id = (int) target->target_id;
int nvram;
- VirtDevice *vdev;
+ VirtTarget *vtarget;
int ii;
u8 width = MPT_NARROW;
u8 factor = MPT_ASYNC;
@@ -2905,9 +2923,9 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
ddvtprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id));
for (ii = 0; ii < id; ii++) {
- if ( (vdev = hd->Targets[ii]) ) {
- vdev->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
- mptscsih_writeSDP1(hd, 0, ii, vdev->negoFlags);
+ if ( (vtarget = hd->Targets[ii]) ) {
+ vtarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
+ mptscsih_writeSDP1(hd, 0, ii, vtarget->negoFlags);
}
}
}
@@ -2926,105 +2944,17 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
- * Else set the NEED_DV flag after Read Capacity Issued (disks)
- * or Mode Sense (cdroms).
- *
- * Tapes, initTarget will set this flag on completion of Inquiry command.
- * Called only if DV_NOT_DONE flag is set
- */
-static void
-mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
-{
- MPT_ADAPTER *ioc = hd->ioc;
- u8 cmd;
- SpiCfgData *pSpi;
-
- ddvtprintk((MYIOC_s_NOTE_FMT
- " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
- hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
-
- if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
- return;
-
- cmd = pReq->CDB[0];
-
- if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
- pSpi = &ioc->spi_data;
- if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
- /* Set NEED_DV for all hidden disks
- */
- Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
- int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
-
- while (numPDisk) {
- pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
- ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
- pPDisk++;
- numPDisk--;
- }
- }
- pSpi->dvStatus[pReq->TargetID] |= MPT_SCSICFG_NEED_DV;
- ddvtprintk(("NEED_DV set for visible disk id %d\n", pReq->TargetID));
- }
-}
-
-/* mptscsih_raid_set_dv_flags()
- *
- * New or replaced disk. Set DV flag and schedule DV.
- */
-static void
-mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
-{
- MPT_ADAPTER *ioc = hd->ioc;
- SpiCfgData *pSpi = &ioc->spi_data;
- Ioc3PhysDisk_t *pPDisk;
- int numPDisk;
-
- if (hd->negoNvram != 0)
- return;
-
- ddvtprintk(("DV requested for phys disk id %d\n", id));
- if (ioc->raid_data.pIocPg3) {
- pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
- numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
- while (numPDisk) {
- if (id == pPDisk->PhysDiskNum) {
- pSpi->dvStatus[pPDisk->PhysDiskID] =
- (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
- pSpi->forceDv = MPT_SCSICFG_NEED_DV;
- ddvtprintk(("NEED_DV set for phys disk id %d\n",
- pPDisk->PhysDiskID));
- break;
- }
- pPDisk++;
- numPDisk--;
- }
-
- if (numPDisk == 0) {
- /* The physical disk that needs DV was not found
- * in the stored IOC Page 3. The driver must reload
- * this page. DV routine will set the NEED_DV flag for
- * all phys disks that have DV_NOT_DONE set.
- */
- pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
- ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
- }
- }
-}
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* If no Target, bus reset on 1st I/O. Set the flag to
* prevent any future negotiations to this device.
*/
static void
-mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id)
+mptscsih_no_negotiate(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc)
{
+ VirtDevice *vdev;
- if ((hd->Targets) && (hd->Targets[target_id] == NULL))
- hd->ioc->spi_data.dvStatus[target_id] |= MPT_SCSICFG_BLK_NEGO;
-
+ if ((vdev = sc->device->hostdata) != NULL)
+ hd->ioc->spi_data.dvStatus[vdev->target_id] |= MPT_SCSICFG_BLK_NEGO;
return;
}
@@ -3100,7 +3030,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
MPT_ADAPTER *ioc = hd->ioc;
Config_t *pReq;
SCSIDevicePage1_t *pData;
- VirtDevice *pTarget=NULL;
+ VirtTarget *vtarget=NULL;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
u16 req_idx;
@@ -3180,11 +3110,11 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
/* If id is not a raid volume, get the updated
* transmission settings from the target structure.
*/
- if (hd->Targets && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) {
- width = pTarget->maxWidth;
- factor = pTarget->minSyncFactor;
- offset = pTarget->maxOffset;
- negoFlags = pTarget->negoFlags;
+ if (hd->Targets && (vtarget = hd->Targets[id]) && !vtarget->raidVolume) {
+ width = vtarget->maxWidth;
+ factor = vtarget->minSyncFactor;
+ offset = vtarget->maxOffset;
+ negoFlags = vtarget->negoFlags;
}
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
@@ -3904,149 +3834,139 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
- * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
- * @hd: Pointer to MPT_SCSI_HOST structure
- * @portnum: IOC port number
+ * mptscsih_negotiate_to_asyn_narrow - Restore devices to default state
+ * @hd: Pointer to a SCSI HOST structure
+ * @vtarget: per device private data
*
* Uses the ISR, but with special processing.
* MUST be single-threaded.
*
- * Return: 0 on completion
*/
-static int
-mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
+static void
+mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget)
{
MPT_ADAPTER *ioc= hd->ioc;
- VirtDevice *pTarget;
- SCSIDevicePage1_t *pcfg1Data = NULL;
- INTERNAL_CMD iocmd;
+ SCSIDevicePage1_t *pcfg1Data;
CONFIGPARMS cfg;
- dma_addr_t cfg1_dma_addr = -1;
- ConfigPageHeader_t header1;
- int bus = 0;
- int id = 0;
- int lun;
- int indexed_lun, lun_index;
- int hostId = ioc->pfacts[portnum].PortSCSIID;
- int max_id;
- int requested, configuration, data;
- int doConfig = 0;
+ dma_addr_t cfg1_dma_addr;
+ ConfigPageHeader_t header;
+ int id;
+ int requested, configuration, data,i;
u8 flags, factor;
- max_id = ioc->sh->max_id - 1;
-
- /* Following parameters will not change
- * in this routine.
- */
- iocmd.cmd = SYNCHRONIZE_CACHE;
- iocmd.flags = 0;
- iocmd.physDiskNum = -1;
- iocmd.data = NULL;
- iocmd.data_dma = -1;
- iocmd.size = 0;
- iocmd.rsvd = iocmd.rsvd2 = 0;
-
- /* No SCSI hosts
- */
- if (hd->Targets == NULL)
- return 0;
-
- /* Skip the host
- */
- if (id == hostId)
- id++;
-
- /* Write SDP1 for all SCSI devices
- * Alloc memory and set up config buffer
- */
- if (ioc->bus_type == SCSI) {
- if (ioc->spi_data.sdp1length > 0) {
- pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
- ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
-
- if (pcfg1Data != NULL) {
- doConfig = 1;
- header1.PageVersion = ioc->spi_data.sdp1version;
- header1.PageLength = ioc->spi_data.sdp1length;
- header1.PageNumber = 1;
- header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
- cfg.cfghdr.hdr = &header1;
- cfg.physAddr = cfg1_dma_addr;
- cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
- cfg.dir = 1;
- cfg.timeout = 0;
- }
- }
- }
+ if (ioc->bus_type != SPI)
+ return;
- /* loop through all devices on this port
- */
- while (bus < MPT_MAX_BUS) {
- iocmd.bus = bus;
- iocmd.id = id;
- pTarget = hd->Targets[(int)id];
+ if (!ioc->spi_data.sdp1length)
+ return;
- if (doConfig) {
+ pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
+ ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
- /* Set the negotiation flags */
- if (pTarget && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) {
- flags = pTarget->negoFlags;
- } else {
- flags = hd->ioc->spi_data.noQas;
- if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
- data = hd->ioc->spi_data.nvram[id];
+ if (pcfg1Data == NULL)
+ return;
- if (data & MPT_NVRAM_WIDE_DISABLE)
- flags |= MPT_TARGET_NO_NEGO_WIDE;
+ header.PageVersion = ioc->spi_data.sdp1version;
+ header.PageLength = ioc->spi_data.sdp1length;
+ header.PageNumber = 1;
+ header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = cfg1_dma_addr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ cfg.dir = 1;
+ cfg.timeout = 0;
- factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
- if ((factor == 0) || (factor == MPT_ASYNC))
- flags |= MPT_TARGET_NO_NEGO_SYNC;
- }
+ if (vtarget->raidVolume && ioc->raid_data.pIocPg3) {
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ id = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID;
+ flags = hd->ioc->spi_data.noQas;
+ if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+ data = hd->ioc->spi_data.nvram[id];
+ if (data & MPT_NVRAM_WIDE_DISABLE)
+ flags |= MPT_TARGET_NO_NEGO_WIDE;
+ factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
+ if ((factor == 0) || (factor == MPT_ASYNC))
+ flags |= MPT_TARGET_NO_NEGO_SYNC;
}
-
- /* Force to async, narrow */
mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
- &configuration, flags);
+ &configuration, flags);
dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
"offset=0 negoFlags=%x request=%x config=%x\n",
id, flags, requested, configuration));
pcfg1Data->RequestedParameters = cpu_to_le32(requested);
pcfg1Data->Reserved = 0;
pcfg1Data->Configuration = cpu_to_le32(configuration);
- cfg.pageAddr = (bus<<8) | id;
+ cfg.pageAddr = (vtarget->bus_id<<8) | id;
mpt_config(hd->ioc, &cfg);
}
+ } else {
+ flags = vtarget->negoFlags;
+ mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
+ &configuration, flags);
+ dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
+ "offset=0 negoFlags=%x request=%x config=%x\n",
+ vtarget->target_id, flags, requested, configuration));
+ pcfg1Data->RequestedParameters = cpu_to_le32(requested);
+ pcfg1Data->Reserved = 0;
+ pcfg1Data->Configuration = cpu_to_le32(configuration);
+ cfg.pageAddr = (vtarget->bus_id<<8) | vtarget->target_id;
+ mpt_config(hd->ioc, &cfg);
+ }
- /* If target Ptr NULL or if this target is NOT a disk, skip.
- */
- if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){
- for (lun=0; lun <= MPT_LAST_LUN; lun++) {
- /* If LUN present, issue the command
- */
- lun_index = (lun >> 5); /* 32 luns per lun_index */
- indexed_lun = (lun % 32);
- if (pTarget->luns[lun_index] & (1<<indexed_lun)) {
- iocmd.lun = lun;
- (void) mptscsih_do_cmd(hd, &iocmd);
- }
- }
- }
+ if (pcfg1Data)
+ pci_free_consistent(ioc->pcidev, header.PageLength * 4, pcfg1Data, cfg1_dma_addr);
+}
- /* get next relevant device */
- id++;
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
+ * @hd: Pointer to a SCSI HOST structure
+ * @vtarget: per device private data
+ * @lun: lun
+ *
+ * Uses the ISR, but with special processing.
+ * MUST be single-threaded.
+ *
+ */
+static void
+mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
+{
+ INTERNAL_CMD iocmd;
- if (id == hostId)
- id++;
+ /* Following parameters will not change
+ * in this routine.
+ */
+ iocmd.cmd = SYNCHRONIZE_CACHE;
+ iocmd.flags = 0;
+ iocmd.physDiskNum = -1;
+ iocmd.data = NULL;
+ iocmd.data_dma = -1;
+ iocmd.size = 0;
+ iocmd.rsvd = iocmd.rsvd2 = 0;
+ iocmd.bus = vdevice->bus_id;
+ iocmd.id = vdevice->target_id;
+ iocmd.lun = (u8)vdevice->lun;
- if (id > max_id) {
- id = 0;
- bus++;
- }
- }
+ if ((vdevice->vtarget->type & TYPE_DISK) &&
+ (vdevice->configured_lun))
+ mptscsih_do_cmd(hd, &iocmd);
+}
- if (pcfg1Data) {
- pci_free_consistent(ioc->pcidev, header1.PageLength * 4, pcfg1Data, cfg1_dma_addr);
+/* Search IOC page 3 to determine if this is hidden physical disk
+ */
+/* Search IOC page 3 to determine if this is hidden physical disk
+ */
+static int
+mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
+{
+ int i;
+
+ if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
+ return 0;
+
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
+ return 1;
}
return 0;
@@ -4101,8 +4021,8 @@ mptscsih_domainValidation(void *arg)
msleep(250);
- /* DV only to SCSI adapters */
- if (ioc->bus_type != SCSI)
+ /* DV only to SPI adapters */
+ if (ioc->bus_type != SPI)
continue;
/* Make sure everything looks ok */
@@ -4205,32 +4125,12 @@ mptscsih_domainValidation(void *arg)
return;
}
-/* Search IOC page 3 to determine if this is hidden physical disk
- */
-/* Search IOC page 3 to determine if this is hidden physical disk
- */
-static int
-mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
-{
- int i;
-
- if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
- return 0;
-
- for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
- if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
- return 1;
- }
-
- return 0;
-}
-
/* Write SDP1 if no QAS has been enabled
*/
static void
mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
{
- VirtDevice *pTarget;
+ VirtTarget *vtarget;
int ii;
if (hd->Targets == NULL)
@@ -4243,11 +4143,11 @@ mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
if ((hd->ioc->spi_data.dvStatus[ii] & MPT_SCSICFG_DV_NOT_DONE) != 0)
continue;
- pTarget = hd->Targets[ii];
+ vtarget = hd->Targets[ii];
- if ((pTarget != NULL) && (!pTarget->raidVolume)) {
- if ((pTarget->negoFlags & hd->ioc->spi_data.noQas) == 0) {
- pTarget->negoFlags |= hd->ioc->spi_data.noQas;
+ if ((vtarget != NULL) && (!vtarget->raidVolume)) {
+ if ((vtarget->negoFlags & hd->ioc->spi_data.noQas) == 0) {
+ vtarget->negoFlags |= hd->ioc->spi_data.noQas;
dnegoprintk(("writeSDP1: id=%d flags=0\n", id));
mptscsih_writeSDP1(hd, 0, ii, 0);
}
@@ -4287,7 +4187,7 @@ static int
mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
{
MPT_ADAPTER *ioc = hd->ioc;
- VirtDevice *pTarget;
+ VirtTarget *vtarget;
SCSIDevicePage1_t *pcfg1Data;
SCSIDevicePage0_t *pcfg0Data;
u8 *pbuf1;
@@ -4358,12 +4258,12 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
iocmd.physDiskNum = -1;
iocmd.rsvd = iocmd.rsvd2 = 0;
- pTarget = hd->Targets[id];
+ vtarget = hd->Targets[id];
/* Use tagged commands if possible.
*/
- if (pTarget) {
- if (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
+ if (vtarget) {
+ if (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
iocmd.flags |= MPT_ICFLAG_TAGGED_CMD;
else {
if (hd->ioc->facts.FWVersion.Word < 0x01000600)
@@ -4579,7 +4479,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* Reset the size for disks
*/
inq0 = (*pbuf1) & 0x1F;
- if ((inq0 == 0) && pTarget && !pTarget->raidVolume) {
+ if ((inq0 == 0) && vtarget && !vtarget->raidVolume) {
sz = 0x40;
iocmd.size = sz;
}
@@ -4589,8 +4489,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
*/
if (inq0 == TYPE_PROCESSOR) {
mptscsih_initTarget(hd,
- bus,
- id,
+ vtarget,
lun,
pbuf1,
sz);
@@ -4604,22 +4503,22 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
goto target_done;
if (sz == 0x40) {
- if ((pTarget->maxWidth == 1) && (pTarget->maxOffset) && (nfactor < 0x0A)
- && (pTarget->minSyncFactor > 0x09)) {
+ if ((vtarget->maxWidth == 1) && (vtarget->maxOffset) && (nfactor < 0x0A)
+ && (vtarget->minSyncFactor > 0x09)) {
if ((pbuf1[56] & 0x04) == 0)
;
else if ((pbuf1[56] & 0x01) == 1) {
- pTarget->minSyncFactor =
+ vtarget->minSyncFactor =
nfactor > MPT_ULTRA320 ? nfactor : MPT_ULTRA320;
} else {
- pTarget->minSyncFactor =
+ vtarget->minSyncFactor =
nfactor > MPT_ULTRA160 ? nfactor : MPT_ULTRA160;
}
- dv.max.factor = pTarget->minSyncFactor;
+ dv.max.factor = vtarget->minSyncFactor;
if ((pbuf1[56] & 0x02) == 0) {
- pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
+ vtarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT
"DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
@@ -4702,8 +4601,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
"DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
mptscsih_initTarget(hd,
- bus,
- id,
+ vtarget,
lun,
pbuf1,
sz);
@@ -5204,7 +5102,7 @@ target_done:
static void
mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
{
- VirtDevice *pTarget;
+ VirtTarget *vtarget;
SCSIDevicePage0_t *pPage0;
SCSIDevicePage1_t *pPage1;
int val = 0, data, configuration;
@@ -5224,11 +5122,11 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
* already throttled back.
*/
negoFlags = hd->ioc->spi_data.noQas;
- if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
- width = pTarget->maxWidth;
- offset = pTarget->maxOffset;
- factor = pTarget->minSyncFactor;
- negoFlags |= pTarget->negoFlags;
+ if ((hd->Targets)&&((vtarget = hd->Targets[(int)id]) != NULL) && !vtarget->raidVolume) {
+ width = vtarget->maxWidth;
+ offset = vtarget->maxOffset;
+ factor = vtarget->minSyncFactor;
+ negoFlags |= vtarget->negoFlags;
} else {
if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
data = hd->ioc->spi_data.nvram[id];
@@ -5430,11 +5328,11 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
* or overwrite nvram (phys disks only).
*/
- if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume ) {
- pTarget->maxWidth = dv->now.width;
- pTarget->maxOffset = dv->now.offset;
- pTarget->minSyncFactor = dv->now.factor;
- pTarget->negoFlags = dv->now.flags;
+ if ((hd->Targets)&&((vtarget = hd->Targets[(int)id]) != NULL) && !vtarget->raidVolume ) {
+ vtarget->maxWidth = dv->now.width;
+ vtarget->maxOffset = dv->now.offset;
+ vtarget->minSyncFactor = dv->now.factor;
+ vtarget->negoFlags = dv->now.flags;
} else {
/* Preserv all flags, use
* read-modify-write algorithm
@@ -5588,6 +5486,94 @@ mptscsih_fillbuf(char *buffer, int size, int index, int width)
break;
}
}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
+ * Else set the NEED_DV flag after Read Capacity Issued (disks)
+ * or Mode Sense (cdroms).
+ *
+ * Tapes, initTarget will set this flag on completion of Inquiry command.
+ * Called only if DV_NOT_DONE flag is set
+ */
+static void
+mptscsih_set_dvflags(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ u8 cmd;
+ SpiCfgData *pSpi;
+
+ ddvtprintk((MYIOC_s_NOTE_FMT
+ " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
+ hd->ioc->name, sc->device->id, sc->device->lun , hd->negoNvram, sc->cmnd[0]));
+
+ if ((sc->device->lun != 0) || (hd->negoNvram != 0))
+ return;
+
+ cmd = sc->cmnd[0];
+
+ if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
+ pSpi = &ioc->spi_data;
+ if ((ioc->raid_data.isRaid & (1 << sc->device->id)) && ioc->raid_data.pIocPg3) {
+ /* Set NEED_DV for all hidden disks
+ */
+ Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
+
+ while (numPDisk) {
+ pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
+ ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
+ pPDisk++;
+ numPDisk--;
+ }
+ }
+ pSpi->dvStatus[sc->device->id] |= MPT_SCSICFG_NEED_DV;
+ ddvtprintk(("NEED_DV set for visible disk id %d\n", sc->device->id));
+ }
+}
+
+/* mptscsih_raid_set_dv_flags()
+ *
+ * New or replaced disk. Set DV flag and schedule DV.
+ */
+static void
+mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ SpiCfgData *pSpi = &ioc->spi_data;
+ Ioc3PhysDisk_t *pPDisk;
+ int numPDisk;
+
+ if (hd->negoNvram != 0)
+ return;
+
+ ddvtprintk(("DV requested for phys disk id %d\n", id));
+ if (ioc->raid_data.pIocPg3) {
+ pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
+ numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
+ while (numPDisk) {
+ if (id == pPDisk->PhysDiskNum) {
+ pSpi->dvStatus[pPDisk->PhysDiskID] =
+ (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
+ pSpi->forceDv = MPT_SCSICFG_NEED_DV;
+ ddvtprintk(("NEED_DV set for phys disk id %d\n",
+ pPDisk->PhysDiskID));
+ break;
+ }
+ pPDisk++;
+ numPDisk--;
+ }
+
+ if (numPDisk == 0) {
+ /* The physical disk that needs DV was not found
+ * in the stored IOC Page 3. The driver must reload
+ * this page. DV routine will set the NEED_DV flag for
+ * all phys disks that have DV_NOT_DONE set.
+ */
+ pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
+ ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
+ }
+ }
+}
#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
EXPORT_SYMBOL(mptscsih_remove);
@@ -5599,7 +5585,9 @@ EXPORT_SYMBOL(mptscsih_resume);
EXPORT_SYMBOL(mptscsih_proc_info);
EXPORT_SYMBOL(mptscsih_info);
EXPORT_SYMBOL(mptscsih_qcmd);
+EXPORT_SYMBOL(mptscsih_target_alloc);
EXPORT_SYMBOL(mptscsih_slave_alloc);
+EXPORT_SYMBOL(mptscsih_target_destroy);
EXPORT_SYMBOL(mptscsih_slave_destroy);
EXPORT_SYMBOL(mptscsih_slave_configure);
EXPORT_SYMBOL(mptscsih_abort);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 971fda4b8b5..d3cba12f4bd 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -91,7 +91,9 @@ extern int mptscsih_resume(struct pci_dev *pdev);
extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_target_alloc(struct scsi_target *starget);
extern int mptscsih_slave_alloc(struct scsi_device *device);
+extern void mptscsih_target_destroy(struct scsi_target *starget);
extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 5c0e307d1d5..ce332a6085e 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -103,13 +103,16 @@ static int mptspiTaskCtx = -1;
static int mptspiInternalCtx = -1; /* Used only for internal commands */
static struct scsi_host_template mptspi_driver_template = {
+ .module = THIS_MODULE,
.proc_name = "mptspi",
.proc_info = mptscsih_proc_info,
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
+ .target_alloc = mptscsih_target_alloc,
.slave_alloc = mptscsih_slave_alloc,
.slave_configure = mptscsih_slave_configure,
+ .target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
@@ -177,13 +180,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
- return -ENODEV;
+ error = -ENODEV;
+ goto out_mptspi_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
- return -ENODEV;
+ error = -ENODEV;
+ goto out_mptspi_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
@@ -208,7 +213,8 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
- return -1;
+ error = -1;
+ goto out_mptspi_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
@@ -286,7 +292,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
- goto mptspi_probe_failed;
+ goto out_mptspi_probe;
}
memset(mem, 0, sz);
@@ -304,14 +310,14 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL) {
error = -ENOMEM;
- goto mptspi_probe_failed;
+ goto out_mptspi_probe;
}
memset(mem, 0, sz);
- hd->Targets = (VirtDevice **) mem;
+ hd->Targets = (VirtTarget **) mem;
dprintk((KERN_INFO
- " Targets @ %p, sz=%d\n", hd->Targets, sz));
+ " vdev @ %p, sz=%d\n", hd->Targets, sz));
/* Clear the TM flags
*/
@@ -385,13 +391,13 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(error) {
dprintk((KERN_ERR MYNAM
"scsi_add_host failed\n"));
- goto mptspi_probe_failed;
+ goto out_mptspi_probe;
}
scsi_scan_host(sh);
return 0;
-mptspi_probe_failed:
+out_mptspi_probe:
mptscsih_remove(pdev);
return error;
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 43a942a29c2..fef67710388 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,18 @@ config I2O
If unsure, say N.
+config I2O_LCT_NOTIFY_ON_CHANGES
+ bool "Enable LCT notification"
+ depends on I2O
+ default y
+ ---help---
+ Only say N here if you have a I2O controller from SUN. The SUN
+ firmware doesn't support LCT notification on changes. If this option
+ is enabled on such a controller the driver will hang up in a endless
+ loop. On all other controllers say Y.
+
+ If unsure, say Y.
+
config I2O_EXT_ADAPTEC
bool "Enable Adaptec extensions"
depends on I2O
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb..ac06f10c54e 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -17,7 +17,7 @@
#include <linux/i2o.h>
#define OSM_NAME "bus-osm"
-#define OSM_VERSION "$Rev$"
+#define OSM_VERSION "1.317"
#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
static struct i2o_driver i2o_bus_driver;
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
*/
static int i2o_bus_scan(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
return -ETIMEDOUT;
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
+ tid);
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
};
/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
*
* Returns count.
*/
-static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t i2o_bus_store_scan(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2o_device *i2o_dev = to_i2o_device(d);
int rc;
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index 10432f66520..3bba7aa82e5 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -22,7 +22,7 @@
#include <asm/uaccess.h>
#define OSM_NAME "config-osm"
-#define OSM_VERSION "1.248"
+#define OSM_VERSION "1.323"
#define OSM_DESCRIPTION "I2O Configuration OSM"
/* access mode user rw */
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 9eefedb1621..90628562851 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -14,8 +14,6 @@
*/
/* Exec-OSM */
-extern struct bus_type i2o_bus_type;
-
extern struct i2o_driver i2o_exec_driver;
extern int i2o_exec_lct_get(struct i2o_controller *);
@@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void);
extern void __exit i2o_exec_exit(void);
/* driver */
+extern struct bus_type i2o_bus_type;
+
extern int i2o_driver_dispatch(struct i2o_controller *, u32);
extern int __init i2o_driver_init(void);
@@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void);
extern void __exit i2o_pci_exit(void);
/* device */
+extern struct device_attribute i2o_device_attrs[];
+
extern void i2o_device_remove(struct i2o_device *);
extern int i2o_device_parse_lct(struct i2o_controller *);
/* IOP */
extern struct i2o_controller *i2o_iop_alloc(void);
-extern void i2o_iop_free(struct i2o_controller *);
+
+/**
+ * i2o_iop_free - Free the i2o_controller struct
+ * @c: I2O controller to free
+ */
+static inline void i2o_iop_free(struct i2o_controller *c)
+{
+ i2o_pool_free(&c->in_msg);
+ kfree(c);
+}
extern int i2o_iop_add(struct i2o_controller *);
extern void i2o_iop_remove(struct i2o_controller *);
-/* config */
-extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
-
/* control registers relative to c->base */
#define I2O_IRQ_STATUS 0x30
#define I2O_IRQ_MASK 0x34
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae..ee183053fa2 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
u32 type)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]);
- writel(type, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
+ msg->body[0] = cpu_to_le32(type);
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
}
/**
@@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev)
return rc;
}
-
/**
* i2o_device_release - release the memory for a I2O device
* @dev: I2O device which should be released
@@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev)
kfree(i2o_dev);
}
-
/**
- * i2o_device_class_show_class_id - Displays class id of I2O device
- * @cd: class device of which the class id should be displayed
+ * i2o_device_show_class_id - Displays class id of I2O device
+ * @dev: device of which the class id should be displayed
+ * @attr: pointer to device attribute
* @buf: buffer into which the class id should be printed
*
* Returns the number of bytes which are printed into the buffer.
@@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev,
}
/**
- * i2o_device_class_show_tid - Displays TID of I2O device
- * @cd: class device of which the TID should be displayed
- * @buf: buffer into which the class id should be printed
+ * i2o_device_show_tid - Displays TID of I2O device
+ * @dev: device of which the TID should be displayed
+ * @attr: pointer to device attribute
+ * @buf: buffer into which the TID should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
static ssize_t i2o_device_show_tid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
@@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev,
return strlen(buf) + 1;
}
+/* I2O device attributes */
struct device_attribute i2o_device_attrs[] = {
__ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
__ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
@@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void)
{
struct i2o_device *dev;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
- memset(dev, 0, sizeof(*dev));
-
INIT_LIST_HEAD(&dev->list);
init_MUTEX(&dev->lock);
@@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void)
}
/**
- * i2o_setup_sysfs_links - Adds attributes to the I2O device
- * @cd: I2O class device which is added to the I2O device class
- *
- * This function get called when a I2O device is added to the class. It
- * creates the attributes for each device and creates user/parent symlink
- * if necessary.
- *
- * Returns 0 on success or negative error code on failure.
- */
-static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev)
-{
- struct i2o_controller *c = i2o_dev->iop;
- struct i2o_device *tmp;
-
- /* create user entries for this device */
- tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
- if (tmp && tmp != i2o_dev)
- sysfs_create_link(&i2o_dev->device.kobj,
- &tmp->device.kobj, "user");
-
- /* create user entries refering to this device */
- list_for_each_entry(tmp, &c->devices, list)
- if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid &&
- tmp != i2o_dev)
- sysfs_create_link(&tmp->device.kobj,
- &i2o_dev->device.kobj, "user");
-
- /* create parent entries for this device */
- tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
- if (tmp && tmp != i2o_dev)
- sysfs_create_link(&i2o_dev->device.kobj,
- &tmp->device.kobj, "parent");
-
- /* create parent entries refering to this device */
- list_for_each_entry(tmp, &c->devices, list)
- if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid &&
- tmp != i2o_dev)
- sysfs_create_link(&tmp->device.kobj,
- &i2o_dev->device.kobj, "parent");
-}
-
-static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
-{
- struct i2o_controller *c = i2o_dev->iop;
- struct i2o_device *tmp;
-
- sysfs_remove_link(&i2o_dev->device.kobj, "parent");
- sysfs_remove_link(&i2o_dev->device.kobj, "user");
-
- list_for_each_entry(tmp, &c->devices, list) {
- if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
- sysfs_remove_link(&tmp->device.kobj, "parent");
- if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
- sysfs_remove_link(&tmp->device.kobj, "user");
- }
-}
-
-
-
-/**
* i2o_device_add - allocate a new I2O device and add it to the IOP
* @iop: I2O controller where the device is on
* @entry: LCT entry of the I2O device
@@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
static struct i2o_device *i2o_device_add(struct i2o_controller *c,
i2o_lct_entry * entry)
{
- struct i2o_device *dev;
+ struct i2o_device *i2o_dev, *tmp;
- dev = i2o_device_alloc();
- if (IS_ERR(dev)) {
+ i2o_dev = i2o_device_alloc();
+ if (IS_ERR(i2o_dev)) {
printk(KERN_ERR "i2o: unable to allocate i2o device\n");
- return dev;
+ return i2o_dev;
}
- dev->lct_data = *entry;
- dev->iop = c;
+ i2o_dev->lct_data = *entry;
- snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
- dev->lct_data.tid);
+ snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
+ i2o_dev->lct_data.tid);
- dev->device.parent = &c->device;
+ i2o_dev->iop = c;
+ i2o_dev->device.parent = &c->device;
- device_register(&dev->device);
+ device_register(&i2o_dev->device);
- list_add_tail(&dev->list, &c->devices);
+ list_add_tail(&i2o_dev->list, &c->devices);
- i2o_setup_sysfs_links(dev);
+ /* create user entries for this device */
+ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
+ if (tmp && (tmp != i2o_dev))
+ sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
+ "user");
- i2o_driver_notify_device_add_all(dev);
+ /* create user entries refering to this device */
+ list_for_each_entry(tmp, &c->devices, list)
+ if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev))
+ sysfs_create_link(&tmp->device.kobj,
+ &i2o_dev->device.kobj, "user");
- pr_debug("i2o: device %s added\n", dev->device.bus_id);
+ /* create parent entries for this device */
+ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
+ if (tmp && (tmp != i2o_dev))
+ sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
+ "parent");
- return dev;
+ /* create parent entries refering to this device */
+ list_for_each_entry(tmp, &c->devices, list)
+ if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev))
+ sysfs_create_link(&tmp->device.kobj,
+ &i2o_dev->device.kobj, "parent");
+
+ i2o_driver_notify_device_add_all(i2o_dev);
+
+ pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
+
+ return i2o_dev;
}
/**
@@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
*/
void i2o_device_remove(struct i2o_device *i2o_dev)
{
+ struct i2o_device *tmp;
+ struct i2o_controller *c = i2o_dev->iop;
+
i2o_driver_notify_device_remove_all(i2o_dev);
- i2o_remove_sysfs_links(i2o_dev);
+
+ sysfs_remove_link(&i2o_dev->device.kobj, "parent");
+ sysfs_remove_link(&i2o_dev->device.kobj, "user");
+
+ list_for_each_entry(tmp, &c->devices, list) {
+ if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "parent");
+ if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "user");
+ }
list_del(&i2o_dev->list);
+
device_unregister(&i2o_dev->device);
}
@@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c)
{
struct i2o_device *dev, *tmp;
i2o_lct *lct;
- int i;
- int max;
+ u32 *dlct = c->dlct.virt;
+ int max = 0, i = 0;
+ u16 table_size;
+ u32 buf;
down(&c->lct_lock);
kfree(c->lct);
- lct = c->dlct.virt;
+ buf = le32_to_cpu(*dlct++);
+ table_size = buf & 0xffff;
- c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL);
- if (!c->lct) {
+ lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
+ if (!lct) {
up(&c->lct_lock);
return -ENOMEM;
}
- if (lct->table_size * 4 > c->dlct.len) {
- memcpy(c->lct, c->dlct.virt, c->dlct.len);
- up(&c->lct_lock);
- return -EAGAIN;
- }
+ lct->lct_ver = buf >> 28;
+ lct->boot_tid = buf >> 16 & 0xfff;
+ lct->table_size = table_size;
+ lct->change_ind = le32_to_cpu(*dlct++);
+ lct->iop_flags = le32_to_cpu(*dlct++);
- memcpy(c->lct, c->dlct.virt, lct->table_size * 4);
-
- lct = c->lct;
-
- max = (lct->table_size - 3) / 9;
+ table_size -= 3;
pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
lct->table_size);
- /* remove devices, which are not in the LCT anymore */
- list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ while (table_size > 0) {
+ i2o_lct_entry *entry = &lct->lct_entry[max];
int found = 0;
- for (i = 0; i < max; i++) {
- if (lct->lct_entry[i].tid == dev->lct_data.tid) {
+ buf = le32_to_cpu(*dlct++);
+ entry->entry_size = buf & 0xffff;
+ entry->tid = buf >> 16 & 0xfff;
+
+ entry->change_ind = le32_to_cpu(*dlct++);
+ entry->device_flags = le32_to_cpu(*dlct++);
+
+ buf = le32_to_cpu(*dlct++);
+ entry->class_id = buf & 0xfff;
+ entry->version = buf >> 12 & 0xf;
+ entry->vendor_id = buf >> 16;
+
+ entry->sub_class = le32_to_cpu(*dlct++);
+
+ buf = le32_to_cpu(*dlct++);
+ entry->user_tid = buf & 0xfff;
+ entry->parent_tid = buf >> 12 & 0xfff;
+ entry->bios_info = buf >> 24;
+
+ memcpy(&entry->identity_tag, dlct, 8);
+ dlct += 2;
+
+ entry->event_capabilities = le32_to_cpu(*dlct++);
+
+ /* add new devices, which are new in the LCT */
+ list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ if (entry->tid == dev->lct_data.tid) {
found = 1;
break;
}
}
if (!found)
- i2o_device_remove(dev);
+ i2o_device_add(c, entry);
+
+ table_size -= 9;
+ max++;
}
- /* add new devices, which are new in the LCT */
- for (i = 0; i < max; i++) {
+ /* remove devices, which are not in the LCT anymore */
+ list_for_each_entry_safe(dev, tmp, &c->devices, list) {
int found = 0;
- list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ for (i = 0; i < max; i++) {
if (lct->lct_entry[i].tid == dev->lct_data.tid) {
found = 1;
break;
@@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c)
}
if (!found)
- i2o_device_add(c, &lct->lct_entry[i]);
+ i2o_device_remove(dev);
}
+
up(&c->lct_lock);
return 0;
}
-
/*
* Run time support routines
*/
@@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
* ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
*/
int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
- int oplen, void *reslist, int reslen)
+ int oplen, void *reslist, int reslen)
{
- struct i2o_message __iomem *msg;
- u32 m;
- u32 *res32 = (u32 *) reslist;
- u32 *restmp = (u32 *) reslist;
- int len = 0;
+ struct i2o_message *msg;
int i = 0;
int rc;
struct i2o_dma res;
@@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
return -ENOMEM;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY) {
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg)) {
i2o_dma_free(dev, &res);
- return -ETIMEDOUT;
+ return PTR_ERR(msg);
}
i = 0;
- writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid,
- &msg->u.head[1]);
- writel(0, &msg->body[i++]);
- writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */
- memcpy_toio(&msg->body[i], oplist, oplen);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
+ msg->body[i++] = cpu_to_le32(0x00000000);
+ msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
+ memcpy(&msg->body[i], oplist, oplen);
i += (oplen / 4 + (oplen % 4 ? 1 : 0));
- writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */
- writel(res.phys, &msg->body[i++]);
+ msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
+ msg->body[i++] = cpu_to_le32(res.phys);
- writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
- SGL_OFFSET_5, &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
+ SGL_OFFSET_5);
- rc = i2o_msg_post_wait_mem(c, m, 10, &res);
+ rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
/* This only looks like a memory leak - don't "fix" it. */
if (rc == -ETIMEDOUT)
@@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
memcpy(reslist, res.virt, res.len);
i2o_dma_free(dev, &res);
- /* Query failed */
- if (rc)
- return rc;
- /*
- * Calculate number of bytes of Result LIST
- * We need to loop through each Result BLOCK and grab the length
- */
- restmp = res32 + 1;
- len = 1;
- for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
- if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING
- "%s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- (cmd ==
- I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
- "PARAMS_GET", res32[1] >> 24,
- (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
-
- /*
- * If this is the only request,than we return an error
- */
- if ((res32[0] & 0x0000FFFF) == 1) {
- return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
- }
- }
- len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
- restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
- }
- return (len << 2); /* bytes used by result list */
+ return rc;
}
/*
@@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
void *buf, int buflen)
{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
+ u32 opblk[] = { cpu_to_le32(0x00000001),
+ cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
+ cpu_to_le32((s16) field << 16 | 0x00000001)
+ };
u8 *resblk; /* 8 bytes for header */
- int size;
-
- if (field == -1) /* whole group */
- opblk[4] = -1;
+ int rc;
resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
if (!resblk)
return -ENOMEM;
- size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
- sizeof(opblk), resblk, buflen + 8);
+ rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
+ sizeof(opblk), resblk, buflen + 8);
memcpy(buf, resblk + 8, buflen); /* cut off header */
kfree(resblk);
- if (size > buflen)
- return buflen;
-
- return size;
+ return rc;
}
/*
@@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
* else return specific fields
* ibuf contains fieldindexes
*
- * if oper == I2O_PARAMS_LIST_GET, get from specific rows
- * if fieldcount == -1 return all fields
+ * if oper == I2O_PARAMS_LIST_GET, get from specific rows
+ * if fieldcount == -1 return all fields
* ibuf contains rowcount, keyvalues
- * else return specific fields
+ * else return specific fields
* fieldcount is # of fieldindexes
- * ibuf contains fieldindexes, rowcount, keyvalues
+ * ibuf contains fieldindexes, rowcount, keyvalues
*
* You could also use directly function i2o_issue_params().
*/
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0fb9c4e2ad4..64130227574 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
};
/* I2O bus type */
-extern struct device_attribute i2o_device_attrs[];
-
struct bus_type i2o_bus_type = {
.name = "i2o",
.match = i2o_bus_match,
- .dev_attrs = i2o_device_attrs,
+ .dev_attrs = i2o_device_attrs
};
/**
@@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
/* cut of header from message size (in 32-bit words) */
size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
- evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO);
+ evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
if (!evt)
return -ENOMEM;
evt->size = size;
evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
evt->event_indicator = le32_to_cpu(msg->body[0]);
- memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4);
+ memcpy(&evt->data, &msg->body[1], size * 4);
list_for_each_entry_safe(dev, tmp, &c->devices, list)
if (dev->lct_data.tid == tid) {
@@ -349,12 +347,10 @@ int __init i2o_driver_init(void)
osm_info("max drivers = %d\n", i2o_max_drivers);
i2o_drivers =
- kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
+ kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
if (!i2o_drivers)
return -ENOMEM;
- memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
-
rc = bus_register(&i2o_bus_type);
if (rc < 0)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b..9bb9859f6df 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -33,7 +33,7 @@
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
+#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
#include <asm/param.h> /* HZ */
#include "core.h"
@@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
{
struct i2o_exec_wait *wait;
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
if (!wait)
- return ERR_PTR(-ENOMEM);
-
- memset(wait, 0, sizeof(*wait));
+ return NULL;
INIT_LIST_HEAD(&wait->list);
@@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
* Returns 0 on success, negative error code on timeout or positive error
* code from reply.
*/
-int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
- timeout, struct i2o_dma *dma)
+int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
+ unsigned long timeout, struct i2o_dma *dma)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
struct i2o_exec_wait *wait;
static u32 tcntxt = 0x80000000;
- struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
int rc = 0;
wait = i2o_exec_wait_alloc();
@@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
* We will only use transaction contexts >= 0x80000000 for POST WAIT,
* so we could find a POST WAIT reply easier in the reply handler.
*/
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
wait->tcntxt = tcntxt++;
- writel(wait->tcntxt, &msg->u.s.tcntxt);
+ msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
if (!wait->complete) {
wait->wq = &wq;
@@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
*
* Returns number of bytes printed into buffer.
*/
-static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t i2o_exec_show_vendor_id(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
- if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
- sprintf(buf, "0x%04x", id);
+ if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
+ sprintf(buf, "0x%04x", le16_to_cpu(id));
return strlen(buf) + 1;
}
@@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
*
* Returns number of bytes printed into buffer.
*/
-static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t i2o_exec_show_product_id(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
- if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
- sprintf(buf, "0x%04x", id);
+ if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
+ sprintf(buf, "0x%04x", le16_to_cpu(id));
return strlen(buf) + 1;
}
@@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
if (i2o_device_parse_lct(c) != -EAGAIN)
change_ind = c->lct->change_ind + 1;
+#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
i2o_exec_lct_notify(c, change_ind);
+#endif
};
/**
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
u32 context;
if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
+ struct i2o_message __iomem *pmsg;
+ u32 pm;
+
/*
* If Fail bit is set we must take the transaction context of
* the preserved message to find the right request again.
*/
- struct i2o_message __iomem *pmsg;
- u32 pm;
pm = le32_to_cpu(msg->body[3]);
-
pmsg = i2o_msg_in_to_virt(c, pm);
+ context = readl(&pmsg->u.s.tcntxt);
i2o_report_status(KERN_INFO, "i2o_core", msg);
- context = readl(&pmsg->u.s.tcntxt);
-
/* Release the preserved msg */
- i2o_msg_nop(c, pm);
+ i2o_msg_nop_mfa(c, pm);
} else
context = le32_to_cpu(msg->u.s.tcntxt);
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
*/
int i2o_exec_lct_get(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int i = 0;
int rc = -EAGAIN;
for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0xffffffff, &msg->body[0]);
- writel(0x00000000, &msg->body[1]);
- writel(0xd0000000 | c->dlct.len, &msg->body[2]);
- writel(c->dlct.phys, &msg->body[3]);
-
- rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] =
+ cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
if (rc < 0)
break;
@@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
{
i2o_status_block *sb = c->status_block.virt;
struct device *dev;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
dev = &c->pdev->dev;
- if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL))
+ if (i2o_dma_realloc
+ (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL))
return -ENOMEM;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); /* FIXME */
- writel(0xffffffff, &msg->body[0]);
- writel(change_ind, &msg->body[1]);
- writel(0xd0000000 | c->dlct.len, &msg->body[2]);
- writel(c->dlct.phys, &msg->body[3]);
-
- i2o_msg_post(c, m);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(change_ind);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ i2o_msg_post(c, msg);
return 0;
};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5bafdd..5b1febed313 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -59,10 +59,12 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include <scsi/scsi.h>
+
#include "i2o_block.h"
#define OSM_NAME "block-osm"
-#define OSM_VERSION "1.287"
+#define OSM_VERSION "1.325"
#define OSM_DESCRIPTION "I2O Block Device OSM"
static struct i2o_driver i2o_block_driver;
@@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev)
*/
static int i2o_block_device_flush(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(60 << 16, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(60 << 16);
osm_debug("Flushing...\n");
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
};
/**
@@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
*/
static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
-
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(-1, &msg->body[0]);
- writel(0, &msg->body[1]);
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
+ msg->body[1] = cpu_to_le32(0x00000000);
osm_debug("Mounting...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
*/
static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(-1, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
osm_debug("Locking...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
*/
static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(media_id, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(media_id);
osm_debug("Unlocking...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
{
struct i2o_device *i2o_dev = dev->i2o_dev;
struct i2o_controller *c = i2o_dev->iop;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int rc;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
- tid, &msg->u.head[1]);
- writel(op << 24, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(op << 24);
osm_debug("Power...\n");
- rc = i2o_msg_post_wait(c, m, 60);
+ rc = i2o_msg_post_wait(c, msg, 60);
if (!rc)
dev->power = op;
@@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
*/
static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
struct i2o_block_request *ireq,
- u32 __iomem ** mptr)
+ u32 ** mptr)
{
int nents;
enum dma_data_direction direction;
@@ -466,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
spin_lock_irqsave(q->queue_lock, flags);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
if (likely(dev)) {
dev->open_queue_depth--;
@@ -745,10 +747,9 @@ static int i2o_block_transfer(struct request *req)
struct i2o_block_device *dev = req->rq_disk->private_data;
struct i2o_controller *c;
int tid = dev->i2o_dev->lct_data.tid;
- struct i2o_message __iomem *msg;
- u32 __iomem *mptr;
+ struct i2o_message *msg;
+ u32 *mptr;
struct i2o_block_request *ireq = req->special;
- u32 m;
u32 tcntxt;
u32 sgl_offset = SGL_OFFSET_8;
u32 ctl_flags = 0x00000000;
@@ -763,9 +764,9 @@ static int i2o_block_transfer(struct request *req)
c = dev->i2o_dev->iop;
- m = i2o_msg_get(c, &msg);
- if (m == I2O_QUEUE_EMPTY) {
- rc = -EBUSY;
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
+ rc = PTR_ERR(msg);
goto exit;
}
@@ -775,8 +776,8 @@ static int i2o_block_transfer(struct request *req)
goto nop_msg;
}
- writel(i2o_block_driver.context, &msg->u.s.icntxt);
- writel(tcntxt, &msg->u.s.tcntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
mptr = &msg->body[0];
@@ -834,11 +835,11 @@ static int i2o_block_transfer(struct request *req)
sgl_offset = SGL_OFFSET_12;
- writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid,
- &msg->u.head[1]);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
- writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
- writel(tid, mptr++);
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(tid);
/*
* ENABLE_DISCONNECT
@@ -846,29 +847,31 @@ static int i2o_block_transfer(struct request *req)
* RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
*/
if (rq_data_dir(req) == READ) {
- cmd[0] = 0x28;
+ cmd[0] = READ_10;
scsi_flags = 0x60a0000a;
} else {
- cmd[0] = 0x2A;
+ cmd[0] = WRITE_10;
scsi_flags = 0xa0a0000a;
}
- writel(scsi_flags, mptr++);
+ *mptr++ = cpu_to_le32(scsi_flags);
*((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
*((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
- memcpy_toio(mptr, cmd, 10);
+ memcpy(mptr, cmd, 10);
mptr += 4;
- writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
+ *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
} else
#endif
{
- writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
- writel(ctl_flags, mptr++);
- writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
- writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++);
- writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++);
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ *mptr++ = cpu_to_le32(ctl_flags);
+ *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ =
+ cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ *mptr++ =
+ cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +879,13 @@ static int i2o_block_transfer(struct request *req)
goto context_remove;
}
- writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) |
- sgl_offset, &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
list_add_tail(&ireq->queue, &dev->open_queue);
dev->open_queue_depth++;
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
@@ -890,7 +893,7 @@ static int i2o_block_transfer(struct request *req)
i2o_cntxt_list_remove(c, req);
nop_msg:
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
exit:
return rc;
@@ -978,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
struct request_queue *queue;
int rc;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
osm_err("Insufficient memory to allocate I2O Block disk.\n");
rc = -ENOMEM;
goto exit;
}
- memset(dev, 0, sizeof(*dev));
INIT_LIST_HEAD(&dev->open_queue);
spin_lock_init(&dev->lock);
@@ -1049,8 +1051,8 @@ static int i2o_block_probe(struct device *dev)
int rc;
u64 size;
u32 blocksize;
- u32 flags, status;
u16 body_size = 4;
+ u16 power;
unsigned short max_sectors;
#ifdef CONFIG_I2O_EXT_ADAPTEC
@@ -1108,22 +1110,20 @@ static int i2o_block_probe(struct device *dev)
* Ask for the current media data. If that isn't supported
* then we ask for the device capacity data
*/
- if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
- i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
- blk_queue_hardsect_size(queue, blocksize);
+ if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
+ !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
+ blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
- if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
- i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
- set_capacity(gd, size >> KERNEL_SECTOR_SHIFT);
+ if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
+ !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
+ set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
} else
osm_warn("could not get size of %s\n", gd->disk_name);
- if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2))
- i2o_blk_dev->power = 0;
- i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
- i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
+ i2o_blk_dev->power = power;
i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1..89daf67b764 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -36,12 +36,12 @@
#include <asm/uaccess.h>
-#include "core.h"
-
#define SG_TABLESIZE 30
-static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
- unsigned long arg);
+extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
+
+static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
+ unsigned long);
static spinlock_t i2o_config_lock;
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
__copy_from_user(buffer.virt, kxfer.buf, fragsize);
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
- writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) |
- (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
- writel(0xD0000000 | fragsize, &msg->body[3]);
- writel(buffer.phys, &msg->body[4]);
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
+ sw_type) << 16) |
+ (((u32) maxfrag) << 8) | (((u32) curfrag)));
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
- status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
- writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((u32) kxfer.flags << 24 | (u32) kxfer.
- sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag,
- &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
- writel(0xD0000000 | fragsize, &msg->body[3]);
- writel(buffer.phys, &msg->body[4]);
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
+ sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
- status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != I2O_POST_WAIT_OK) {
if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
struct i2o_controller *c;
struct i2o_sw_xfer kxfer;
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int swlen;
int token;
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16,
- &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
+ msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
- token = i2o_msg_post_wait(c, m, 10);
+ token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
{
int token;
int iop = (int)arg;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
struct i2o_controller *c;
c = i2o_find_iop(iop);
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
- token = i2o_msg_post_wait(c, m, 10);
+ token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
struct i2o_evt_id kdesc;
struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
if (!d)
return -ENODEV;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]);
- writel(kdesc.evt_mask, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
+ kdesc.tid);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
+ msg->body[0] = cpu_to_le32(kdesc.evt_mask);
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
u32 sg_index = 0;
i2o_status_block *sb;
struct i2o_message *msg;
- u32 m;
unsigned int iop;
cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
return -ENXIO;
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
sb = c->status_block.virt;
@@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
reply_size >>= 16;
reply_size <<= 2;
- reply = kmalloc(reply_size, GFP_KERNEL);
+ reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
return -ENOMEM;
}
- memset(reply, 0, reply_size);
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
- writel(i2o_config_driver.context, &msg->u.s.icntxt);
- writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
-
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
struct sg_simple_element *sg;
@@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
goto cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
- p = &(sg_list[sg_index++]);
+ p = &(sg_list[sg_index]);
/* Allocate memory for the transfer */
if (i2o_dma_alloc
(&c->pdev->dev, p, sg_size,
@@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
rcode = -ENOMEM;
goto sg_list_cleanup;
}
+ sg_index++;
/* Copy in the user's SG buffer if necessary */
if (sg[i].
flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
@@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- rcode = i2o_msg_post_wait(c, m, 60);
- if (rcode)
+ rcode = i2o_msg_post_wait(c, msg, 60);
+ if (rcode) {
+ reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
+ }
if (sg_offset) {
u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
@@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
+ sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- sg_list_cleanup:
for (i = 0; i < sg_index; i++)
i2o_dma_free(&c->pdev->dev, &sg_list[i]);
@@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg)
u32 i = 0;
void *p = NULL;
i2o_status_block *sb;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int iop;
if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg)
return -ENXIO;
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
sb = c->status_block.virt;
@@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg)
reply_size >>= 16;
reply_size <<= 2;
- reply = kmalloc(reply_size, GFP_KERNEL);
+ reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
return -ENOMEM;
}
- memset(reply, 0, reply_size);
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
- writel(i2o_config_driver.context, &msg->u.s.icntxt);
- writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
-
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
struct sg_simple_element *sg;
@@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- rcode = i2o_msg_post_wait(c, m, 60);
- if (rcode)
+ rcode = i2o_msg_post_wait(c, msg, 60);
+ if (rcode) {
+ reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
+ }
if (sg_offset) {
u32 msg[128];
@@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
+ sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- sg_list_cleanup:
for (i = 0; i < sg_index; i++)
kfree(sg_list[i]);
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
index 561d63304d7..6502b817df5 100644
--- a/drivers/message/i2o/i2o_lan.h
+++ b/drivers/message/i2o/i2o_lan.h
@@ -103,14 +103,14 @@
#define I2O_LAN_DSC_SUSPENDED 0x11
struct i2o_packet_info {
- u32 offset : 24;
- u32 flags : 8;
- u32 len : 24;
- u32 status : 8;
+ u32 offset:24;
+ u32 flags:8;
+ u32 len:24;
+ u32 status:8;
};
struct i2o_bucket_descriptor {
- u32 context; /* FIXME: 64bit support */
+ u32 context; /* FIXME: 64bit support */
struct i2o_packet_info packet_info[1];
};
@@ -127,14 +127,14 @@ struct i2o_lan_local {
u8 unit;
struct i2o_device *i2o_dev;
- struct fddi_statistics stats; /* see also struct net_device_stats */
- unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
- atomic_t buckets_out; /* nbr of unused buckets on DDM */
- atomic_t tx_out; /* outstanding TXes */
- u8 tx_count; /* packets in one TX message frame */
- u16 tx_max_out; /* DDM's Tx queue len */
- u8 sgl_max; /* max SGLs in one message frame */
- u32 m; /* IOP address of the batch msg frame */
+ struct fddi_statistics stats; /* see also struct net_device_stats */
+ unsigned short (*type_trans) (struct sk_buff *, struct net_device *);
+ atomic_t buckets_out; /* nbr of unused buckets on DDM */
+ atomic_t tx_out; /* outstanding TXes */
+ u8 tx_count; /* packets in one TX message frame */
+ u16 tx_max_out; /* DDM's Tx queue len */
+ u8 sgl_max; /* max SGLs in one message frame */
+ u32 m; /* IOP address of the batch msg frame */
struct work_struct i2o_batch_send_task;
int send_active;
@@ -144,16 +144,16 @@ struct i2o_lan_local {
spinlock_t tx_lock;
- u32 max_size_mc_table; /* max number of multicast addresses */
+ u32 max_size_mc_table; /* max number of multicast addresses */
/* LAN OSM configurable parameters are here: */
- u16 max_buckets_out; /* max nbr of buckets to send to DDM */
- u16 bucket_thresh; /* send more when this many used */
+ u16 max_buckets_out; /* max nbr of buckets to send to DDM */
+ u16 bucket_thresh; /* send more when this many used */
u16 rx_copybreak;
- u8 tx_batch_mode; /* Set when using batch mode sends */
- u32 i2o_event_mask; /* To turn on interesting event flags */
+ u8 tx_batch_mode; /* Set when using batch mode sends */
+ u32 i2o_event_mask; /* To turn on interesting event flags */
};
-#endif /* _I2O_LAN_H */
+#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index d559a175836..2a0c42b8cda 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -28,7 +28,7 @@
*/
#define OSM_NAME "proc-osm"
-#define OSM_VERSION "1.145"
+#define OSM_VERSION "1.316"
#define OSM_DESCRIPTION "I2O ProcFS OSM"
#define I2O_MAX_MODULES 4
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933..f9e5a23697a 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -70,7 +70,7 @@
#include <scsi/sg_request.h>
#define OSM_NAME "scsi-osm"
-#define OSM_VERSION "1.282"
+#define OSM_VERSION "1.316"
#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
static struct i2o_driver i2o_scsi_driver;
@@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
- if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* SCSI bus */
max_channel++;
}
@@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
i = 0;
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
- if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* only SCSI bus */
i2o_shost->channel[i++] = i2o_dev;
@@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev)
u8 type;
struct i2o_device *d = i2o_shost->channel[0];
- if (i2o_parm_field_get(d, 0x0000, 0, &type, 1)
+ if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* SCSI bus */
- if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
+ if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
channel = 0;
if (i2o_dev->lct_data.class_id ==
I2O_CLASS_RANDOM_BLOCK_STORAGE)
- lun = i2o_shost->lun++;
+ lun =
+ cpu_to_le64(i2o_shost->
+ lun++);
else
lun = 0;
}
@@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev)
break;
case I2O_CLASS_SCSI_PERIPHERAL:
- if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0)
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
return -EFAULT;
- if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0)
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
return -EFAULT;
parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
@@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev)
return -EFAULT;
}
- if (id >= scsi_host->max_id) {
- osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id,
- scsi_host->max_id);
+ if (le32_to_cpu(id) >= scsi_host->max_id) {
+ osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
+ le32_to_cpu(id), scsi_host->max_id);
return -EFAULT;
}
- if (lun >= scsi_host->max_lun) {
- osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)",
- (unsigned int)lun, scsi_host->max_lun);
+ if (le64_to_cpu(lun) >= scsi_host->max_lun) {
+ osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
+ (long unsigned int)le64_to_cpu(lun),
+ scsi_host->max_lun);
return -EFAULT;
}
scsi_dev =
- __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
+ __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
+ le64_to_cpu(lun), i2o_dev);
if (IS_ERR(scsi_dev)) {
osm_warn("can not add SCSI device %03x\n",
@@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev)
sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
"scsi");
- osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n",
- i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
+ osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
+ i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
+ (long unsigned int)le64_to_cpu(lun));
return 0;
};
@@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
struct i2o_controller *c;
struct i2o_device *i2o_dev;
int tid;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
/*
* ENABLE_DISCONNECT
* SIMPLE_TAG
@@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
*/
u32 scsi_flags = 0x20a00000;
u32 sgl_offset;
- u32 __iomem *mptr;
+ u32 *mptr;
u32 cmd = I2O_CMD_SCSI_EXEC << 24;
int rc = 0;
@@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
* throw it back to the scsi layer
*/
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY) {
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit;
}
@@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
if (sgl_offset == SGL_OFFSET_10)
sgl_offset = SGL_OFFSET_12;
cmd = I2O_CMD_PRIVATE << 24;
- writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
- writel(adpt_flags | tid, mptr++);
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(adpt_flags | tid);
}
#endif
- writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
- writel(i2o_scsi_driver.context, &msg->u.s.icntxt);
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
/* We want the SCSI control block back */
- writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt);
+ msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
/* LSI_920_PCI_QUIRK
*
@@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
*/
- writel(scsi_flags | SCpnt->cmd_len, mptr++);
+ *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
/* Write SCSI command into the message - always 16 byte block */
- memcpy_toio(mptr, SCpnt->cmnd, 16);
+ memcpy(mptr, SCpnt->cmnd, 16);
mptr += 4;
if (sgl_offset != SGL_OFFSET_0) {
/* write size of data addressed by SGL */
- writel(SCpnt->request_bufflen, mptr++);
+ *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
/* Now fill in the SGList and command */
if (SCpnt->use_sg) {
@@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
/* Stick the headers on */
- writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset,
- &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
/* Queue the message */
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
osm_debug("Issued %ld\n", SCpnt->serial_number);
@@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
nomem:
rc = -ENOMEM;
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
exit:
return rc;
@@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
{
struct i2o_device *i2o_dev;
struct i2o_controller *c;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int tid;
int status = FAILED;
@@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
c = i2o_dev->iop;
tid = i2o_dev->lct_data.tid;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
return SCSI_MLQUEUE_HOST_BUSY;
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid,
- &msg->u.head[1]);
- writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
+ msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
- if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT))
+ if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
status = SUCCESS;
return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842..49216744693 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -32,7 +32,7 @@
#include "core.h"
#define OSM_NAME "i2o"
-#define OSM_VERSION "1.288"
+#define OSM_VERSION "1.325"
#define OSM_DESCRIPTION "I2O subsystem"
/* global I2O controller list */
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
static int i2o_hrt_get(struct i2o_controller *c);
/**
- * i2o_msg_nop - Returns a message which is not used
- * @c: I2O controller from which the message was created
- * @m: message which should be returned
- *
- * If you fetch a message via i2o_msg_get, and can't use it, you must
- * return the message with this function. Otherwise the message frame
- * is lost.
- */
-void i2o_msg_nop(struct i2o_controller *c, u32 m)
-{
- struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
-
- writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- i2o_msg_post(c, m);
-};
-
-/**
* i2o_msg_get_wait - obtain an I2O message from the IOP
* @c: I2O controller
* @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
* address from the read port (see the i2o spec). If no message is
* available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
*/
-u32 i2o_msg_get_wait(struct i2o_controller *c,
- struct i2o_message __iomem ** msg, int wait)
+struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
{
unsigned long timeout = jiffies + wait * HZ;
- u32 m;
+ struct i2o_message *msg;
- while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
+ while (IS_ERR(msg = i2o_msg_get(c))) {
if (time_after(jiffies, timeout)) {
osm_debug("%s: Timeout waiting for message frame.\n",
c->name);
- return I2O_QUEUE_EMPTY;
+ return ERR_PTR(-ETIMEDOUT);
}
schedule_timeout_uninterruptible(1);
}
- return m;
+ return msg;
};
#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
*/
static int i2o_iop_quiesce(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
int rc;
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
(sb->iop_state != ADAPTER_STATE_OPERATIONAL))
return 0;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/* Long timeout needed for quiesce if lots of devices */
- if ((rc = i2o_msg_post_wait(c, m, 240)))
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
*/
static int i2o_iop_enable(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
int rc;
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
if (sb->iop_state != ADAPTER_STATE_READY)
return -EINVAL;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/* How long of a timeout do we need? */
- if ((rc = i2o_msg_post_wait(c, m, 240)))
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
*/
static int i2o_iop_clear(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int rc;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
- if ((rc = i2o_msg_post_wait(c, m, 30)))
+ if ((rc = i2o_msg_post_wait(c, msg, 30)))
osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
* Clear and (re)initialize IOP's outbound queue and post the message
* frames to the IOP.
*
- * Returns 0 on success or a negative errno code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
{
- volatile u8 *status = c->status.virt;
u32 m;
- struct i2o_message __iomem *msg;
+ volatile u8 *status = c->status.virt;
+ struct i2o_message *msg;
ulong timeout;
int i;
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
memset(c->status.virt, 0, 4);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0x00000000, &msg->u.s.tcntxt);
- writel(PAGE_SIZE, &msg->body[0]);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(PAGE_SIZE);
/* Outbound msg frame size in words and Initcode */
- writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);
- writel(0xd0000004, &msg->body[2]);
- writel(i2o_dma_low(c->status.phys), &msg->body[3]);
- writel(i2o_dma_high(c->status.phys), &msg->body[4]);
+ msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
+ msg->body[2] = cpu_to_le32(0xd0000004);
+ msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
static int i2o_iop_reset(struct i2o_controller *c)
{
volatile u8 *status = c->status.virt;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned long timeout;
i2o_status_block *sb = c->status_block.virt;
int rc = 0;
osm_debug("%s: Resetting controller\n", c->name);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
memset(c->status_block.virt, 0, 8);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
- writel(0, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(i2o_dma_low(c->status.phys), &msg->body[2]);
- writel(i2o_dma_high(c->status.phys), &msg->body[3]);
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
osm_debug("%s: Reset in progress, waiting for reboot...\n",
c->name);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
- while (m == I2O_QUEUE_EMPTY) {
+ while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
if (time_after(jiffies, timeout)) {
osm_err("%s: IOP reset timeout.\n", c->name);
- rc = -ETIMEDOUT;
+ rc = PTR_ERR(msg);
goto exit;
}
schedule_timeout_uninterruptible(1);
-
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
}
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
/* from here all quiesce commands are safe */
c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
*/
static int i2o_iop_systab_set(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
struct device *dev = &c->pdev->dev;
struct resource *root;
@@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
}
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
PCI_DMA_TODEVICE);
if (!i2o_systab.phys) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
- writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/*
* Provide three SGL-elements:
* System table (SysTab), Private memory space declaration and
* Private i/o space declaration
- *
- * FIXME: is this still true?
- * Nasty one here. We can't use dma_alloc_coherent to send the
- * same table to everyone. We have to go remap it for them all
*/
- writel(c->unit + 2, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(0x54000000 | i2o_systab.len, &msg->body[2]);
- writel(i2o_systab.phys, &msg->body[3]);
- writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
- writel(sb->current_mem_base, &msg->body[5]);
- writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
- writel(sb->current_io_base, &msg->body[6]);
+ msg->body[0] = cpu_to_le32(c->unit + 2);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
+ msg->body[3] = cpu_to_le32(i2o_systab.phys);
+ msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
+ msg->body[5] = cpu_to_le32(sb->current_mem_base);
+ msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
+ msg->body[6] = cpu_to_le32(sb->current_io_base);
- rc = i2o_msg_post_wait(c, m, 120);
+ rc = i2o_msg_post_wait(c, msg, 120);
dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
PCI_DMA_TODEVICE);
@@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
else
osm_debug("%s: SysTab set.\n", c->name);
- i2o_status_get(c); // Entered READY state
-
return rc;
}
@@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
*
* Send the system table and enable the I2O controller.
*
- * Returns 0 on success or negativer error code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_iop_online(struct i2o_controller *c)
{
@@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c)
list_for_each_entry_safe(dev, tmp, &c->devices, list)
i2o_device_remove(dev);
- class_device_unregister(c->classdev);
device_del(&c->device);
/* Ask the IOP to switch to RESET state */
@@ -869,12 +838,11 @@ static int i2o_systab_build(void)
i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
sizeof(struct i2o_sys_tbl_entry);
- systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL);
+ systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
if (!systab) {
osm_err("unable to allocate memory for System Table\n");
return -ENOMEM;
}
- memset(systab, 0, i2o_systab.len);
systab->version = I2OVERSION;
systab->change_ind = change_ind + 1;
@@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
*/
int i2o_status_get(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
volatile u8 *status_block;
unsigned long timeout;
status_block = (u8 *) c->status_block.virt;
memset(c->status_block.virt, 0, sizeof(i2o_status_block));
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
- writel(0, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(i2o_dma_low(c->status_block.phys), &msg->body[2]);
- writel(i2o_dma_high(c->status_block.phys), &msg->body[3]);
- writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
+ msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c)
* The HRT contains information about possible hidden devices but is
* mostly useless to us.
*
- * Returns 0 on success or negativer error code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_hrt_get(struct i2o_controller *c)
{
@@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
struct device *dev = &c->pdev->dev;
for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
- writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0xd0000000 | c->hrt.len, &msg->body[0]);
- writel(c->hrt.phys, &msg->body[1]);
+ msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
+ msg->body[1] = cpu_to_le32(c->hrt.phys);
- rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
+ rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
if (rc < 0) {
osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c)
}
/**
- * i2o_iop_free - Free the i2o_controller struct
- * @c: I2O controller to free
- */
-void i2o_iop_free(struct i2o_controller *c)
-{
- kfree(c);
-};
-
-/**
* i2o_iop_release - release the memory for a I2O controller
* @dev: I2O controller which should be released
*
@@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev)
i2o_iop_free(c);
};
-/* I2O controller class */
-static struct class *i2o_controller_class;
-
/**
* i2o_iop_alloc - Allocate and initialize a i2o_controller struct
*
* Allocate the necessary memory for a i2o_controller struct and
- * initialize the lists.
+ * initialize the lists and message mempool.
*
* Returns a pointer to the I2O controller or a negative error code on
* failure.
@@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void)
{
static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
struct i2o_controller *c;
+ char poolname[32];
- c = kmalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
osm_err("i2o: Insufficient memory to allocate a I2O controller."
"\n");
return ERR_PTR(-ENOMEM);
}
- memset(c, 0, sizeof(*c));
+
+ c->unit = unit++;
+ sprintf(c->name, "iop%d", c->unit);
+
+ snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
+ if (i2o_pool_alloc
+ (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
+ I2O_MSG_INPOOL_MIN)) {
+ kfree(c);
+ return ERR_PTR(-ENOMEM);
+ };
INIT_LIST_HEAD(&c->devices);
spin_lock_init(&c->lock);
init_MUTEX(&c->lct_lock);
- c->unit = unit++;
- sprintf(c->name, "iop%d", c->unit);
device_initialize(&c->device);
@@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c)
goto iop_reset;
}
- c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0),
- &c->device, "iop%d", c->unit);
- if (IS_ERR(c->classdev)) {
- osm_err("%s: could not add controller class\n", c->name);
- goto device_del;
- }
-
osm_info("%s: Activating I2O controller...\n", c->name);
osm_info("%s: This may take a few minutes if there are many devices\n",
c->name);
if ((rc = i2o_iop_activate(c))) {
osm_err("%s: could not activate controller\n", c->name);
- goto class_del;
+ goto device_del;
}
osm_debug("%s: building sys table...\n", c->name);
if ((rc = i2o_systab_build()))
- goto class_del;
+ goto device_del;
osm_debug("%s: online controller...\n", c->name);
if ((rc = i2o_iop_online(c)))
- goto class_del;
+ goto device_del;
osm_debug("%s: getting LCT...\n", c->name);
if ((rc = i2o_exec_lct_get(c)))
- goto class_del;
+ goto device_del;
list_add(&c->list, &i2o_controllers);
@@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c)
return 0;
- class_del:
- class_device_unregister(c->classdev);
-
device_del:
device_del(&c->device);
@@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c)
* is waited for, or expected. If you do not want further notifications,
* call the i2o_event_register again with a evt_mask of 0.
*
- * Returns 0 on success or -ETIMEDOUT if no message could be fetched for
- * sending the request.
+ * Returns 0 on success or negative error code on failure.
*/
int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
int tcntxt, u32 evt_mask)
{
struct i2o_controller *c = dev->iop;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
- tid, &msg->u.head[1]);
- writel(drv->context, &msg->u.s.icntxt);
- writel(tcntxt, &msg->u.s.tcntxt);
- writel(evt_mask, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->u.s.icntxt = cpu_to_le32(drv->context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
+ msg->body[0] = cpu_to_le32(evt_mask);
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
};
@@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void)
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
- i2o_controller_class = class_create(THIS_MODULE, "i2o_controller");
- if (IS_ERR(i2o_controller_class)) {
- osm_err("can't register class i2o_controller\n");
- goto exit;
- }
-
if ((rc = i2o_driver_init()))
- goto class_exit;
+ goto exit;
if ((rc = i2o_exec_init()))
goto driver_exit;
@@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void)
driver_exit:
i2o_driver_exit();
- class_exit:
- class_destroy(i2o_controller_class);
-
exit:
return rc;
}
@@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void)
i2o_pci_exit();
i2o_exec_exit();
i2o_driver_exit();
- class_destroy(i2o_controller_class);
};
module_init(i2o_iop_init);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec..c5b656cdea7 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
pci_name(pdev));
c->pdev = pdev;
- c->device.parent = get_device(&pdev->dev);
+ c->device.parent = &pdev->dev;
/* Cards that fall apart if you hit them with large I/O loads... */
if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
@@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
if ((rc = i2o_iop_add(c)))
goto uninstall;
- get_device(&c->device);
-
if (i960)
pci_write_config_word(i960, 0x42, 0x03ff);
@@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
i2o_pci_free(c);
free_controller:
- put_device(c->device.parent);
i2o_iop_free(c);
disable:
@@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev)
printk(KERN_INFO "%s: Controller removed.\n", c->name);
- put_device(c->device.parent);
put_device(&c->device);
};
@@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void)
{
pci_unregister_driver(&i2o_pci_driver);
};
+
MODULE_DEVICE_TABLE(pci, i2o_pci_ids);