aboutsummaryrefslogtreecommitdiff
path: root/drivers/ieee1394/sbp2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ieee1394/sbp2.c')
-rw-r--r--drivers/ieee1394/sbp2.c108
1 files changed, 50 insertions, 58 deletions
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index e68b80b7340..4edfff46b1e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -51,7 +51,6 @@
* Grep for inline FIXME comments below.
*/
-#include <linux/blkdev.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -304,10 +303,11 @@ static struct scsi_host_template sbp2_shost_template = {
.use_clustering = ENABLE_CLUSTERING,
.cmd_per_lun = SBP2_MAX_CMDS,
.can_queue = SBP2_MAX_CMDS,
- .emulated = 1,
.sdev_attrs = sbp2_sysfs_sdev_attrs,
};
+/* for match-all entries in sbp2_workarounds_table */
+#define SBP2_ROM_VALUE_WILDCARD 0x1000000
/*
* List of devices with known bugs.
@@ -329,22 +329,14 @@ static const struct {
},
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
+ .model_id = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_INQUIRY_36,
},
/* Symbios bridge */ {
.firmware_revision = 0xa0b800,
+ .model_id = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
- /*
- * Note about the following Apple iPod blacklist entries:
- *
- * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
- * matching logic treats 0 as a wildcard, we cannot match this ID
- * without rewriting the matching routine. Fortunately these iPods
- * do not feature the read_capacity bug according to one report.
- * Read_capacity behaviour as well as model_id could change due to
- * Apple-supplied firmware updates though.
- */
/* iPod 4th generation */ {
.firmware_revision = 0x0a2700,
.model_id = 0x000021,
@@ -490,11 +482,11 @@ static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
return -ENOMEM;
}
- cmd->command_orb_dma = dma_map_single(&hi->host->device,
+ cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
&cmd->command_orb,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
- cmd->sge_dma = dma_map_single(&hi->host->device,
+ cmd->sge_dma = dma_map_single(hi->host->device.parent,
&cmd->scatter_gather_element,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
@@ -516,10 +508,11 @@ static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
if (!list_empty(&lu->cmd_orb_completed))
list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
cmd = list_entry(lh, struct sbp2_command_info, list);
- dma_unmap_single(&host->device, cmd->command_orb_dma,
+ dma_unmap_single(host->device.parent,
+ cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
- dma_unmap_single(&host->device, cmd->sge_dma,
+ dma_unmap_single(host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
kfree(cmd);
@@ -601,17 +594,17 @@ static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
if (cmd->cmd_dma) {
if (cmd->dma_type == CMD_DMA_SINGLE)
- dma_unmap_single(&host->device, cmd->cmd_dma,
+ dma_unmap_single(host->device.parent, cmd->cmd_dma,
cmd->dma_size, cmd->dma_dir);
else if (cmd->dma_type == CMD_DMA_PAGE)
- dma_unmap_page(&host->device, cmd->cmd_dma,
+ dma_unmap_page(host->device.parent, cmd->cmd_dma,
cmd->dma_size, cmd->dma_dir);
/* XXX: Check for CMD_DMA_NONE bug */
cmd->dma_type = CMD_DMA_NONE;
cmd->cmd_dma = 0;
}
if (cmd->sge_buffer) {
- dma_unmap_sg(&host->device, cmd->sge_buffer,
+ dma_unmap_sg(host->device.parent, cmd->sge_buffer,
cmd->dma_size, cmd->dma_dir);
cmd->sge_buffer = NULL;
}
@@ -836,37 +829,37 @@ static int sbp2_start_device(struct sbp2_lu *lu)
struct sbp2_fwhost_info *hi = lu->hi;
int error;
- lu->login_response = dma_alloc_coherent(&hi->host->device,
+ lu->login_response = dma_alloc_coherent(hi->host->device.parent,
sizeof(struct sbp2_login_response),
&lu->login_response_dma, GFP_KERNEL);
if (!lu->login_response)
goto alloc_fail;
- lu->query_logins_orb = dma_alloc_coherent(&hi->host->device,
+ lu->query_logins_orb = dma_alloc_coherent(hi->host->device.parent,
sizeof(struct sbp2_query_logins_orb),
&lu->query_logins_orb_dma, GFP_KERNEL);
if (!lu->query_logins_orb)
goto alloc_fail;
- lu->query_logins_response = dma_alloc_coherent(&hi->host->device,
+ lu->query_logins_response = dma_alloc_coherent(hi->host->device.parent,
sizeof(struct sbp2_query_logins_response),
&lu->query_logins_response_dma, GFP_KERNEL);
if (!lu->query_logins_response)
goto alloc_fail;
- lu->reconnect_orb = dma_alloc_coherent(&hi->host->device,
+ lu->reconnect_orb = dma_alloc_coherent(hi->host->device.parent,
sizeof(struct sbp2_reconnect_orb),
&lu->reconnect_orb_dma, GFP_KERNEL);
if (!lu->reconnect_orb)
goto alloc_fail;
- lu->logout_orb = dma_alloc_coherent(&hi->host->device,
+ lu->logout_orb = dma_alloc_coherent(hi->host->device.parent,
sizeof(struct sbp2_logout_orb),
&lu->logout_orb_dma, GFP_KERNEL);
if (!lu->logout_orb)
goto alloc_fail;
- lu->login_orb = dma_alloc_coherent(&hi->host->device,
+ lu->login_orb = dma_alloc_coherent(hi->host->device.parent,
sizeof(struct sbp2_login_orb),
&lu->login_orb_dma, GFP_KERNEL);
if (!lu->login_orb)
@@ -929,32 +922,32 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
list_del(&lu->lu_list);
if (lu->login_response)
- dma_free_coherent(&hi->host->device,
+ dma_free_coherent(hi->host->device.parent,
sizeof(struct sbp2_login_response),
lu->login_response,
lu->login_response_dma);
if (lu->login_orb)
- dma_free_coherent(&hi->host->device,
+ dma_free_coherent(hi->host->device.parent,
sizeof(struct sbp2_login_orb),
lu->login_orb,
lu->login_orb_dma);
if (lu->reconnect_orb)
- dma_free_coherent(&hi->host->device,
+ dma_free_coherent(hi->host->device.parent,
sizeof(struct sbp2_reconnect_orb),
lu->reconnect_orb,
lu->reconnect_orb_dma);
if (lu->logout_orb)
- dma_free_coherent(&hi->host->device,
+ dma_free_coherent(hi->host->device.parent,
sizeof(struct sbp2_logout_orb),
lu->logout_orb,
lu->logout_orb_dma);
if (lu->query_logins_orb)
- dma_free_coherent(&hi->host->device,
+ dma_free_coherent(hi->host->device.parent,
sizeof(struct sbp2_query_logins_orb),
lu->query_logins_orb,
lu->query_logins_orb_dma);
if (lu->query_logins_response)
- dma_free_coherent(&hi->host->device,
+ dma_free_coherent(hi->host->device.parent,
sizeof(struct sbp2_query_logins_response),
lu->query_logins_response,
lu->query_logins_response_dma);
@@ -1306,11 +1299,13 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
- if (sbp2_workarounds_table[i].firmware_revision &&
+ if (sbp2_workarounds_table[i].firmware_revision !=
+ SBP2_ROM_VALUE_WILDCARD &&
sbp2_workarounds_table[i].firmware_revision !=
(firmware_revision & 0xffff00))
continue;
- if (sbp2_workarounds_table[i].model_id &&
+ if (sbp2_workarounds_table[i].model_id !=
+ SBP2_ROM_VALUE_WILDCARD &&
sbp2_workarounds_table[i].model_id != ud->model_id)
continue;
workarounds |= sbp2_workarounds_table[i].workarounds;
@@ -1445,7 +1440,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
cmd->dma_size = sgpnt[0].length;
cmd->dma_type = CMD_DMA_PAGE;
- cmd->cmd_dma = dma_map_page(&hi->host->device,
+ cmd->cmd_dma = dma_map_page(hi->host->device.parent,
sgpnt[0].page, sgpnt[0].offset,
cmd->dma_size, cmd->dma_dir);
@@ -1457,8 +1452,8 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
&cmd->scatter_gather_element[0];
u32 sg_count, sg_len;
dma_addr_t sg_addr;
- int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg,
- dma_dir);
+ int i, count = dma_map_sg(hi->host->device.parent, sgpnt,
+ scsi_use_sg, dma_dir);
cmd->dma_size = scsi_use_sg;
cmd->sge_buffer = sgpnt;
@@ -1508,7 +1503,8 @@ static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
cmd->dma_dir = dma_dir;
cmd->dma_size = scsi_request_bufflen;
cmd->dma_type = CMD_DMA_SINGLE;
- cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer,
+ cmd->cmd_dma = dma_map_single(hi->host->device.parent,
+ scsi_request_buffer,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
orb->misc |= ORB_SET_DIRECTION(orb_direction);
@@ -1626,10 +1622,11 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
size_t length;
unsigned long flags;
- dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma,
+ dma_sync_single_for_device(hi->host->device.parent,
+ cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
- dma_sync_single_for_device(&hi->host->device, cmd->sge_dma,
+ dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
@@ -1655,14 +1652,15 @@ static void sbp2_link_orb_command(struct sbp2_lu *lu,
* The target's fetch agent may or may not have read this
* previous ORB yet.
*/
- dma_sync_single_for_cpu(&hi->host->device, last_orb_dma,
+ dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);
wmb();
/* Tells hardware that this pointer is valid */
last_orb->next_ORB_hi = 0;
- dma_sync_single_for_device(&hi->host->device, last_orb_dma,
+ dma_sync_single_for_device(hi->host->device.parent,
+ last_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
addr += SBP2_DOORBELL_OFFSET;
@@ -1790,10 +1788,11 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
else
cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo);
if (cmd) {
- dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
+ dma_sync_single_for_cpu(hi->host->device.parent,
+ cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
- dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
+ dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
/* Grab SCSI command pointers and check status. */
@@ -1882,16 +1881,6 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
if (unlikely(SCpnt->device->lun))
goto done;
- /* handle the request sense command here (auto-request sense) */
- if (SCpnt->cmnd[0] == REQUEST_SENSE) {
- memcpy(SCpnt->request_buffer, SCpnt->sense_buffer,
- SCpnt->request_bufflen);
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
- sbp2scsi_complete_command(lu, SBP2_SCSI_STATUS_GOOD, SCpnt,
- done);
- return 0;
- }
-
if (unlikely(!hpsb_node_entry_valid(lu->ne))) {
SBP2_ERR("Bus reset in progress - rejecting command");
result = DID_BUS_BUSY << 16;
@@ -1931,10 +1920,11 @@ static void sbp2scsi_complete_all_commands(struct sbp2_lu *lu, u32 status)
while (!list_empty(&lu->cmd_orb_inuse)) {
lh = lu->cmd_orb_inuse.next;
cmd = list_entry(lh, struct sbp2_command_info, list);
- dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma,
+ dma_sync_single_for_cpu(hi->host->device.parent,
+ cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
- dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
+ dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(lu, cmd);
@@ -2021,9 +2011,10 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
{
struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
- blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
sdev->use_10_for_rw = 1;
+ if (sdev->type == TYPE_ROM)
+ sdev->use_10_for_ms = 1;
if (sdev->type == TYPE_DISK &&
lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
sdev->skip_ms_page_8 = 1;
@@ -2059,11 +2050,12 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
spin_lock_irqsave(&lu->cmd_orb_lock, flags);
cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt);
if (cmd) {
- dma_sync_single_for_cpu(&hi->host->device,
+ dma_sync_single_for_cpu(hi->host->device.parent,
cmd->command_orb_dma,
sizeof(struct sbp2_command_orb),
DMA_TO_DEVICE);
- dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma,
+ dma_sync_single_for_cpu(hi->host->device.parent,
+ cmd->sge_dma,
sizeof(cmd->scatter_gather_element),
DMA_BIDIRECTIONAL);
sbp2util_mark_command_completed(lu, cmd);