diff options
Diffstat (limited to 'drivers')
38 files changed, 802 insertions, 624 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 34b0da5cfa0..1437d7ee3b1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) return iser_conn_set_full_featured_mode(conn); } -static void -iscsi_iser_conn_terminate(struct iscsi_conn *conn) -{ - struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iser_conn *ib_conn = iser_conn->ib_conn; - - BUG_ON(!ib_conn); - /* starts conn teardown process, waits until all previously * - * posted buffers get flushed, deallocates all conn resources */ - iser_conn_terminate(ib_conn); - iser_conn->ib_conn = NULL; - conn->recv_lock = NULL; -} - - static struct iscsi_transport iscsi_iser_transport; static struct iscsi_cls_session * @@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) static void iscsi_iser_ep_disconnect(__u64 ep_handle) { - struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); + struct iser_conn *ib_conn; + ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); if (!ib_conn) return; iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); - iser_conn_terminate(ib_conn); } @@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = { .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, .stop_conn = iscsi_conn_stop, - /* these are called as part of conn recovery */ - .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */ - .terminate_conn = iscsi_iser_conn_terminate, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_iser_conn_get_stats, diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cd789b8acd..adc9d8f2c28 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count) printk("\n"); } + +/****************************************************************/ +/****** Functions to handle the request ID hash table ********/ +/****************************************************************/ + +#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF + +static int zfcp_reqlist_init(struct zfcp_adapter *adapter) +{ + int i; + + adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), + GFP_KERNEL); + + if (!adapter->req_list) + return -ENOMEM; + + for (i=0; i<REQUEST_LIST_SIZE; i++) + INIT_LIST_HEAD(&adapter->req_list[i]); + + return 0; +} + +static void zfcp_reqlist_free(struct zfcp_adapter *adapter) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i; + + for (i=0; i<REQUEST_LIST_SIZE; i++) { + if (list_empty(&adapter->req_list[i])) + continue; + + list_for_each_entry_safe(request, tmp, + &adapter->req_list[i], list) + list_del(&request->list); + } + + kfree(adapter->req_list); +} + +void zfcp_reqlist_add(struct zfcp_adapter *adapter, + struct zfcp_fsf_req *fsf_req) +{ + unsigned int i; + + i = fsf_req->req_id % REQUEST_LIST_SIZE; + list_add_tail(&fsf_req->list, &adapter->req_list[i]); +} + +void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i, counter; + u64 dbg_tmp[2]; + + i = req_id % REQUEST_LIST_SIZE; + BUG_ON(list_empty(&adapter->req_list[i])); + + counter = 0; + list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) { + if (request->req_id == req_id) { + dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); + dbg_tmp[1] = (u64) counter; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + list_del(&request->list); + break; + } + counter++; + } +} + +struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter, + unsigned long req_id) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i; + + i = req_id % REQUEST_LIST_SIZE; + + list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) + if (request->req_id == req_id) + return request; + + return NULL; +} + +int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) +{ + unsigned int i; + + for (i=0; i<REQUEST_LIST_SIZE; i++) + if (!list_empty(&adapter->req_list[i])) + return 0; + + return 1; +} + +#undef ZFCP_LOG_AREA + /****************************************************************/ /************** Uncategorised Functions *************************/ /****************************************************************/ @@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_LIST_HEAD(&adapter->port_remove_lh); /* initialize list of fsf requests */ - spin_lock_init(&adapter->fsf_req_list_lock); - INIT_LIST_HEAD(&adapter->fsf_req_list_head); + spin_lock_init(&adapter->req_list_lock); + retval = zfcp_reqlist_init(adapter); + if (retval) { + ZFCP_LOG_INFO("request list initialization failed\n"); + goto failed_low_mem_buffers; + } /* initialize debug locks */ @@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) * !0 - struct zfcp_adapter data structure could not be removed * (e.g. still used) * locks: adapter list write lock is assumed to be held by caller - * adapter->fsf_req_list_lock is taken and released within this - * function and must not be held on entry */ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) @@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); dev_set_drvdata(&adapter->ccw_device->dev, NULL); /* sanity check: no pending FSF requests */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - retval = !list_empty(&adapter->fsf_req_list_head); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - if (retval) { + spin_lock_irqsave(&adapter->req_list_lock, flags); + retval = zfcp_reqlist_isempty(adapter); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + if (!retval) { ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " "%i requests outstanding\n", zfcp_get_busid_by_adapter(adapter), adapter, - atomic_read(&adapter->fsf_reqs_active)); + atomic_read(&adapter->reqs_active)); retval = -EBUSY; goto out; } @@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_free_low_mem_buffers(adapter); /* free memory of adapter data structure and queues */ zfcp_qdio_free_queues(adapter); + zfcp_reqlist_free(adapter); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); ZFCP_LOG_TRACE("freeing adapter structure\n"); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 57d8e4bfb8d..fdabadeaa9e 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) retval = zfcp_adapter_scsi_register(adapter); if (retval) goto out_scsi_register; + + /* initialize request counter */ + BUG_ON(!zfcp_reqlist_isempty(adapter)); + adapter->req_no = 0; + zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2df512a18e2..94d1b74db35 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -52,7 +52,7 @@ /********************* GENERAL DEFINES *********************************/ /* zfcp version number, it consists of major, minor, and patch-level number */ -#define ZFCP_VERSION "4.7.0" +#define ZFCP_VERSION "4.8.0" /** * zfcp_sg_to_address - determine kernel address from struct scatterlist @@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list) #define REQUEST_LIST_SIZE 128 /********************* SCSI SPECIFIC DEFINES *********************************/ -#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) +#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ @@ -886,11 +886,11 @@ struct zfcp_adapter { struct list_head port_remove_lh; /* head of ports to be removed */ u32 ports; /* number of remote ports */ - struct timer_list scsi_er_timer; /* SCSI err recovery watch */ - struct list_head fsf_req_list_head; /* head of FSF req list */ - spinlock_t fsf_req_list_lock; /* lock for ops on list of - FSF requests */ - atomic_t fsf_reqs_active; /* # active FSF reqs */ + struct timer_list scsi_er_timer; /* SCSI err recovery watch */ + atomic_t reqs_active; /* # active FSF reqs */ + unsigned long req_no; /* unique FSF req number */ + struct list_head *req_list; /* list of pending reqs */ + spinlock_t req_list_lock; /* request list lock */ struct zfcp_qdio_queue request_queue; /* request queue */ u32 fsf_req_seq_no; /* FSF cmnd seq number */ wait_queue_head_t request_wq; /* can be used to wait for @@ -986,6 +986,7 @@ struct zfcp_unit { /* FSF request */ struct zfcp_fsf_req { struct list_head list; /* list of FSF requests */ + unsigned long req_id; /* unique request ID */ struct zfcp_adapter *adapter; /* adapter request belongs to */ u8 sbal_number; /* nr of SBALs free for use */ u8 sbal_first; /* first SBAL for this request */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 8ec8da0beaa..7f60b6fdf72 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); +static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); +static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); @@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); -static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); -static int zfcp_erp_action_dismiss_port(struct zfcp_port *); -static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); -static int zfcp_erp_action_dismiss(struct zfcp_erp_action *); +static void zfcp_erp_action_dismiss_port(struct zfcp_port *); +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); @@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data) zfcp_erp_adapter_reopen(adapter, 0); } -/* - * function: zfcp_fsf_scsi_er_timeout_handler - * - * purpose: This function needs to be called whenever a SCSI error recovery - * action (abort/reset) does not return. - * Re-opening the adapter means that the command can be returned - * by zfcp (it is guarranteed that it does not return via the - * adapter anymore). The buffer can then be used again. - * - * returns: sod all +/** + * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks + * + * This function needs to be called whenever a SCSI error recovery + * action (abort/reset) does not return. Re-opening the adapter means + * that the abort/reset command can be returned by zfcp. It won't complete + * via the adapter anymore (because qdio queues are closed). If ERP is + * already running on this adapter it will be stopped. */ -void -zfcp_fsf_scsi_er_timeout_handler(unsigned long data) +void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + unsigned long flags; ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " "Restarting all operations on the adapter %s\n", zfcp_get_busid_by_adapter(adapter)); debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); - zfcp_erp_adapter_reopen(adapter, 0); - return; + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + &adapter->status)) { + zfcp_erp_modify_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, + ZFCP_CLEAR); + zfcp_erp_action_dismiss_adapter(adapter); + write_unlock_irqrestore(&adapter->erp_lock, flags); + /* dismiss all pending requests including requests for ERP */ + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + } else + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_erp_adapter_reopen(adapter, 0); } /* @@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) return retval; } -/* - * function: - * - * purpose: disable I/O, - * return any open requests and clean them up, - * aim: no pending and incoming I/O - * - * returns: +/** + * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests */ -static void -zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) +static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) { debug_text_event(adapter->erp_dbf, 6, "a_bl"); zfcp_erp_modify_adapter_status(adapter, @@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) clear_mask, ZFCP_CLEAR); } -/* - * function: - * - * purpose: enable I/O - * - * returns: +/** + * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests */ -static void -zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) +static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { debug_text_event(adapter->erp_dbf, 6, "a_ubl"); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); @@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) struct zfcp_adapter *adapter = erp_action->adapter; if (erp_action->fsf_req) { - /* take lock to ensure that request is not being deleted meanwhile */ - spin_lock(&adapter->fsf_req_list_lock); - /* check whether fsf req does still exist */ - list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) - if (fsf_req == erp_action->fsf_req) - break; - if (fsf_req && (fsf_req->erp_action == erp_action)) { + /* take lock to ensure that request is not deleted meanwhile */ + spin_lock(&adapter->req_list_lock); + if ((!zfcp_reqlist_ismember(adapter, + erp_action->fsf_req->req_id)) && + (fsf_req->erp_action == erp_action)) { /* fsf_req still exists */ debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); debug_event(adapter->erp_dbf, 3, &fsf_req, sizeof (unsigned long)); - /* dismiss fsf_req of timed out or dismissed erp_action */ + /* dismiss fsf_req of timed out/dismissed erp_action */ if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { debug_text_event(adapter->erp_dbf, 3, @@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) */ erp_action->fsf_req = NULL; } - spin_unlock(&adapter->fsf_req_list_lock); + spin_unlock(&adapter->req_list_lock); } else debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); return retval; } -/* - * purpose: generic handler for asynchronous events related to erp_action events - * (normal completion, time-out, dismissing, retry after - * low memory condition) - * - * note: deletion of timer is not required (e.g. in case of a time-out), - * but a second try does no harm, - * we leave it in here to allow for greater simplification +/** + * zfcp_erp_async_handler_nolock - complete erp_action * - * returns: 0 - there was an action to handle - * !0 - otherwise + * Used for normal completion, time-out, dismissal and failure after + * low memory condition. */ -static int -zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { - int retval; struct zfcp_adapter *adapter = erp_action->adapter; if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { @@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, del_timer(&erp_action->timer); erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); - retval = 0; } else { /* action is ready or gone - nothing to do */ debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); - retval = 1; } - - return retval; } -/* - * purpose: generic handler for asynchronous events related to erp_action - * events (normal completion, time-out, dismissing, retry after - * low memory condition) - * - * note: deletion of timer is not required (e.g. in case of a time-out), - * but a second try does no harm, - * we leave it in here to allow for greater simplification - * - * returns: 0 - there was an action to handle - * !0 - otherwise +/** + * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking */ -int -zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; - int retval; write_lock_irqsave(&adapter->erp_lock, flags); - retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); + zfcp_erp_async_handler_nolock(erp_action, set_mask); write_unlock_irqrestore(&adapter->erp_lock, flags); - - return retval; } /* @@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data) zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); } -/* - * purpose: is called for an erp_action which needs to be ended - * though not being done, - * this is usually required if an higher is generated, - * action gets an appropriate flag and will be processed - * accordingly +/** + * zfcp_erp_action_dismiss - dismiss an erp_action * - * locks: erp_lock held (thus we need to call another handler variant) + * adapter->erp_lock must be held + * + * Dismissal of an erp_action is usually required if an erp_action of + * higher priority is generated. */ -static int -zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; @@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); - - return 0; } int @@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) return retval; } -/* - * function: zfcp_qdio_cleanup - * - * purpose: cleans up QDIO operation for the specified adapter - * - * returns: 0 - successful cleanup - * !0 - failed cleanup +/** + * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter */ -int +static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; int first_used; int used_count; struct zfcp_adapter *adapter = erp_action->adapter; @@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " "queues on adapter %s\n", zfcp_get_busid_by_adapter(adapter)); - retval = ZFCP_ERP_FAILED; - goto out; + return; } /* * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that * do_QDIO won't be called while qdio_shutdown is in progress. */ - write_lock_irq(&adapter->request_queue.queue_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); write_unlock_irq(&adapter->request_queue.queue_lock); @@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) adapter->request_queue.free_index = 0; atomic_set(&adapter->request_queue.free_count, 0); adapter->request_queue.distance_from_int = 0; - out: - return retval; } static int @@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) "%s)\n", zfcp_get_busid_by_adapter(adapter)); ret = ZFCP_ERP_FAILED; } - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { - ZFCP_LOG_INFO("error: exchange port data failed (adapter " + + /* don't treat as error for the sake of compatibility */ + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) + ZFCP_LOG_INFO("warning: exchange port data failed (adapter " "%s\n", zfcp_get_busid_by_adapter(adapter)); - ret = ZFCP_ERP_FAILED; - } return ret; } @@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action return retval; } -/* - * function: zfcp_fsf_cleanup - * - * purpose: cleanup FSF operation for specified adapter - * - * returns: 0 - FSF operation successfully cleaned up - * !0 - failed to cleanup FSF operation for this adapter +/** + * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter */ -static int +static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; struct zfcp_adapter *adapter = erp_action->adapter; /* @@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); - - return retval; } /* @@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, } -static int -zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) +void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) { - int retval = 0; struct zfcp_port *port; debug_text_event(adapter->erp_dbf, 5, "a_actab"); @@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) else list_for_each_entry(port, &adapter->port_list_head, list) zfcp_erp_action_dismiss_port(port); - - return retval; } -static int -zfcp_erp_action_dismiss_port(struct zfcp_port *port) +static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) { - int retval = 0; struct zfcp_unit *unit; struct zfcp_adapter *adapter = port->adapter; @@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port) else list_for_each_entry(unit, &port->unit_list_head, list) zfcp_erp_action_dismiss_unit(unit); - - return retval; } -static int -zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) { - int retval = 0; struct zfcp_adapter *adapter = unit->port->adapter; debug_text_event(adapter->erp_dbf, 5, "u_actab"); debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) zfcp_erp_action_dismiss(&unit->erp_action); - - return retval; } static inline void diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d02366004cd..146d7a2b4c4 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); extern void zfcp_qdio_free_queues(struct zfcp_adapter *); extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, struct zfcp_fsf_req *); -extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *); extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req (struct zfcp_fsf_req *, int, int); @@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); +extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); extern int zfcp_erp_port_reopen(struct zfcp_port *, int); @@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern int zfcp_erp_thread_kill(struct zfcp_adapter *); extern int zfcp_erp_wait(struct zfcp_adapter *); -extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); +extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); extern int zfcp_test_link(struct zfcp_port *); @@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, struct scsi_cmnd *); +extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); +extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long); +extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *, + unsigned long); +extern int zfcp_reqlist_isempty(struct zfcp_adapter *); #endif /* ZFCP_EXT_H */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 31db2b06fab..ff2eacf5ec8 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, struct fsf_link_down_info *); static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); -static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); /* association between FSF command and FSF QTCB type */ static u32 fsf_qtcb_type[] = { @@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) kfree(fsf_req); } -/* - * function: - * - * purpose: - * - * returns: - * - * note: qdio queues shall be down (no ongoing inbound processing) +/** + * zfcp_fsf_req_dismiss - dismiss a single fsf request */ -int -zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) +static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter, + struct zfcp_fsf_req *fsf_req, + unsigned int counter) { - struct zfcp_fsf_req *fsf_req, *tmp; - unsigned long flags; - LIST_HEAD(remove_queue); + u64 dbg_tmp[2]; - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_splice_init(&adapter->fsf_req_list_head, &remove_queue); - atomic_set(&adapter->fsf_reqs_active, 0); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - - list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { - list_del(&fsf_req->list); - zfcp_fsf_req_dismiss(fsf_req); - } - - return 0; + dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); + dbg_tmp[1] = (u64) counter; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + list_del(&fsf_req->list); + fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_fsf_req_complete(fsf_req); } -/* - * function: - * - * purpose: - * - * returns: +/** + * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests */ -static void -zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req) +int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) { - fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_fsf_req_complete(fsf_req); + struct zfcp_fsf_req *request, *tmp; + unsigned long flags; + unsigned int i, counter; + + spin_lock_irqsave(&adapter->req_list_lock, flags); + atomic_set(&adapter->reqs_active, 0); + for (i=0; i<REQUEST_LIST_SIZE; i++) { + if (list_empty(&adapter->req_list[i])) + continue; + + counter = 0; + list_for_each_entry_safe(request, tmp, + &adapter->req_list[i], list) { + zfcp_fsf_req_dismiss(adapter, request, counter); + counter++; + } + } + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + return 0; } /* @@ -4592,12 +4592,14 @@ static inline void zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) { if (likely(fsf_req->qtcb != NULL)) { - fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; - fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; + fsf_req->qtcb->prefix.req_seq_no = + fsf_req->adapter->fsf_req_seq_no; + fsf_req->qtcb->prefix.req_id = fsf_req->req_id; fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; - fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; + fsf_req->qtcb->prefix.qtcb_type = + fsf_qtcb_type[fsf_req->fsf_command]; fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; - fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; + fsf_req->qtcb->header.req_handle = fsf_req->req_id; fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; } } @@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, { volatile struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req = NULL; + unsigned long flags; int ret = 0; struct zfcp_qdio_queue *req_queue = &adapter->request_queue; @@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, fsf_req->adapter = adapter; fsf_req->fsf_command = fsf_cmd; + INIT_LIST_HEAD(&fsf_req->list); + + /* unique request id */ + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req->req_id = adapter->req_no++; + spin_unlock_irqrestore(&adapter->req_list_lock, flags); zfcp_fsf_req_qtcb_init(fsf_req); @@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); /* setup common SBALE fields */ - sbale[0].addr = fsf_req; + sbale[0].addr = (void *) fsf_req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; if (likely(fsf_req->qtcb != NULL)) { sbale[1].addr = (void *) fsf_req->qtcb; @@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) volatile struct qdio_buffer_element *sbale; int inc_seq_no; int new_distance_from_int; - unsigned long flags; + u64 dbg_tmp[2]; int retval = 0; adapter = fsf_req->adapter; @@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, sbale[1].length); - /* put allocated FSF request at list tail */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + /* put allocated FSF request into hash table */ + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_add(adapter, fsf_req); + spin_unlock(&adapter->req_list_lock); inc_seq_no = (fsf_req->qtcb != NULL); @@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) QDIO_FLAG_SYNC_OUTPUT, 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); + dbg_tmp[0] = (unsigned long) sbale[0].addr; + dbg_tmp[1] = (u64) retval; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + if (unlikely(retval)) { /* Queues are down..... */ retval = -EIO; @@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) */ if (timer) del_timer(timer); - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_del(&fsf_req->list); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - /* - * adjust the number of free SBALs in request queue as well as - * position of first one - */ + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_remove(adapter, fsf_req->req_id); + spin_unlock(&adapter->req_list_lock); + /* undo changes in request queue made for this request */ zfcp_qdio_zero_sbals(req_queue->buffer, fsf_req->sbal_first, fsf_req->sbal_number); atomic_add(fsf_req->sbal_number, &req_queue->free_count); - req_queue->free_index -= fsf_req->sbal_number; /* increase */ + req_queue->free_index -= fsf_req->sbal_number; req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ - ZFCP_LOG_DEBUG - ("error: do_QDIO failed. Buffers could not be enqueued " - "to request queue.\n"); + zfcp_erp_adapter_reopen(adapter, 0); } else { req_queue->distance_from_int = new_distance_from_int; /* @@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) adapter->fsf_req_seq_no++; /* count FSF requests pending */ - atomic_inc(&adapter->fsf_reqs_active); + atomic_inc(&adapter->reqs_active); } return retval; } diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 49ea5add4ab..dbd9f48e863 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device, return; } +/** + * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status + */ +static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, + unsigned long req_id) +{ + struct zfcp_fsf_req *fsf_req; + unsigned long flags; + + debug_long_event(adapter->erp_dbf, 4, req_id); + + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req = zfcp_reqlist_ismember(adapter, req_id); + + if (!fsf_req) { + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); + zfcp_erp_adapter_reopen(adapter, 0); + return -EINVAL; + } + + zfcp_reqlist_remove(adapter, req_id); + atomic_dec(&adapter->reqs_active); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + /* finish the FSF request */ + zfcp_fsf_req_complete(fsf_req); + + return 0; +} + /* * function: zfcp_qdio_response_handler * @@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, /* look for QDIO request identifiers in SB */ buffere = &buffer->element[buffere_index]; retval = zfcp_qdio_reqid_check(adapter, - (void *) buffere->addr); + (unsigned long) buffere->addr); if (retval) { ZFCP_LOG_NORMAL("bug: unexpected inbound " @@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, return; } -/* - * function: zfcp_qdio_reqid_check - * - * purpose: checks for valid reqids or unsolicited status - * - * returns: 0 - valid request id or unsolicited status - * !0 - otherwise - */ -int -zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) -{ - struct zfcp_fsf_req *fsf_req; - unsigned long flags; - - /* invalid (per convention used in this driver) */ - if (unlikely(!sbale_addr)) { - ZFCP_LOG_NORMAL("bug: invalid reqid\n"); - return -EINVAL; - } - - /* valid request id and thus (hopefully :) valid fsf_req address */ - fsf_req = (struct zfcp_fsf_req *) sbale_addr; - - /* serialize with zfcp_fsf_req_dismiss_all */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - if (list_empty(&adapter->fsf_req_list_head)) { - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - return 0; - } - list_del(&fsf_req->list); - atomic_dec(&adapter->fsf_reqs_active); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - - if (unlikely(adapter != fsf_req->adapter)) { - ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " - "fsf_req->adapter=%p, adapter=%p)\n", - fsf_req, fsf_req->adapter, adapter); - return -EINVAL; - } - - /* finish the FSF request */ - zfcp_fsf_req_complete(fsf_req); - - return 0; -} - /** * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue * @queue: queue from which SBALE should be returned diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 671f4a6a5d1..1bb55086db9 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, void (*done) (struct scsi_cmnd *)); static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); -static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); static int zfcp_task_management_function(struct zfcp_unit *, u8, struct scsi_cmnd *); @@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = { .scsi_host_template = { .name = ZFCP_NAME, .proc_name = "zfcp", - .proc_info = NULL, - .detect = NULL, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .queuecommand = zfcp_scsi_queuecommand, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, + .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .can_queue = 4096, .this_id = -1, - /* - * FIXME: - * one less? can zfcp_create_sbale cope with it? - */ .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, .cmd_per_lun = 1, - .unchecked_isa_dma = 0, .use_clustering = 1, .sdev_attrs = zfcp_sysfs_sdev_attrs, }, .driver_version = ZFCP_VERSION, - /* rest initialised with zeros */ }; /* Find start of Response Information in FCP response unit*/ @@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) return retval; } -static void -zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) +/** + * zfcp_scsi_slave_destroy - called when scsi device is removed + * + * Remove reference to associated scsi device for an zfcp_unit. + * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs + * or a scan for this device might have failed. + */ +static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; @@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); sdpnt->hostdata = NULL; unit->device = NULL; + zfcp_erp_unit_failed(unit); zfcp_unit_put(unit); } else { ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " @@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, } /** - * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) + * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset + * + * If ERP is already running it will be stopped. */ -int -zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) +int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; - - ZFCP_LOG_NORMAL("bus reset because of problems with " - "unit 0x%016Lx\n", unit->fcp_lun); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); - - return SUCCESS; -} + struct zfcp_unit *unit; + struct zfcp_adapter *adapter; + unsigned long flags; -/** - * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) - */ -int -zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) -{ - struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + unit = (struct zfcp_unit*) scpnt->device->hostdata; + adapter = unit->port->adapter; - ZFCP_LOG_NORMAL("host reset because of problems with " + ZFCP_LOG_NORMAL("host/bus reset because of problems with " "unit 0x%016Lx\n", unit->fcp_lun); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); + + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + &adapter->status)) { + zfcp_erp_modify_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, + ZFCP_CLEAR); + zfcp_erp_action_dismiss_adapter(adapter); + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + zfcp_erp_adapter_reopen(adapter, 0); + } else { + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_erp_adapter_reopen(adapter, 0); + zfcp_erp_wait(adapter); + } return SUCCESS; } diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ab2f8b26790..74d4d22e5c0 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -577,7 +577,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); writel(IOPMU_INBOUND_MSG0_RESET, - &hba->iop->outbound_msgaddr0); + &hba->iop->inbound_msgaddr0); hptiop_pci_posting_flush(hba->iop); } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 848fb2aa4ca..058f094f945 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -43,13 +43,10 @@ #include "iscsi_tcp.h" -#define ISCSI_TCP_VERSION "1.0-595" - MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " "Alex Aizman <itn780@yahoo.com>"); MODULE_DESCRIPTION("iSCSI/TCP data-path"); MODULE_LICENSE("GPL"); -MODULE_VERSION(ISCSI_TCP_VERSION); /* #define DEBUG_TCP */ #define DEBUG_ASSERT @@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) * must be called with session lock */ static void -__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_r2t_info *r2t; struct scsi_cmnd *sc; + /* flush ctask's r2t queues */ + while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { + __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); + } + sc = ctask->sc; if (unlikely(!sc)) return; @@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) spin_unlock(&session->lock); return 0; } + rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); BUG_ON(!rc); @@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->exp_r2tsn = r2tsn + 1; tcp_ctask->xmstate |= XMSTATE_SOL_HDR; __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); - __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + list_move_tail(&ctask->running, &conn->xmitqueue); scsi_queue_work(session->host, &conn->xmitwork); conn->r2t_pdus_cnt++; @@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) case ISCSI_OP_SCSI_DATA_IN: tcp_conn->in.ctask = session->cmds[itt]; rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); + if (rc) + return rc; /* fall through */ case ISCSI_OP_SCSI_CMD_RSP: tcp_conn->in.ctask = session->cmds[itt]; @@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) goto copy_hdr; spin_lock(&session->lock); - __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); + iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); spin_unlock(&session->lock); break; @@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: - case ISCSI_OP_LOGOUT_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_REJECT: case ISCSI_OP_ASYNC_EVENT: + /* + * It is possible that we could get a PDU with a buffer larger + * than 8K, but there are no targets that currently do this. + * For now we fail until we find a vendor that needs it + */ + if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < + tcp_conn->in.datalen) { + printk(KERN_ERR "iscsi_tcp: received buffer of len %u " + "but conn buffer is only %u (opcode %0x)\n", + tcp_conn->in.datalen, + DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); + rc = ISCSI_ERR_PROTO; + break; + } + if (tcp_conn->in.datalen) goto copy_hdr; /* fall through */ + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; @@ -523,7 +546,7 @@ copy_hdr: * skbs to complete the command then we have to copy the header * for later use */ - if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < + if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= (tcp_conn->in.datalen + tcp_conn->in.padding + (conn->datadgst_en ? 4 : 0))) { debug_tcp("Copying header for later use. in.copy %d in.datalen" @@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, * byte counters. **/ static inline int -iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) +iscsi_tcp_copy(struct iscsi_conn *conn) { - void *buf = tcp_conn->data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int buf_size = tcp_conn->in.datalen; int buf_left = buf_size - tcp_conn->data_copied; int size = min(tcp_conn->in.copy, buf_left); @@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) BUG_ON(size <= 0); rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, - (char*)buf + tcp_conn->data_copied, size); + (char*)conn->data + tcp_conn->data_copied, size); BUG_ON(rc); tcp_conn->in.offset += size; @@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) done: /* check for non-exceptional status */ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { - debug_scsi("done [sc %lx res %d itt 0x%x]\n", - (long)sc, sc->result, ctask->itt); + debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", + (long)sc, sc->result, ctask->itt, + tcp_conn->in.hdr->flags); spin_lock(&conn->session->lock); - __iscsi_ctask_cleanup(conn, ctask); + iscsi_tcp_cleanup_ctask(conn, ctask); __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); spin_unlock(&conn->session->lock); } @@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn) break; case ISCSI_OP_SCSI_CMD_RSP: spin_lock(&conn->session->lock); - __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); + iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); spin_unlock(&conn->session->lock); case ISCSI_OP_TEXT_RSP: case ISCSI_OP_LOGIN_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_ASYNC_EVENT: case ISCSI_OP_REJECT: /* * Collect data segment to the connection's data * placeholder */ - if (iscsi_tcp_copy(tcp_conn)) { + if (iscsi_tcp_copy(conn)) { rc = -EAGAIN; goto exit; } - rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, + rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, tcp_conn->in.datalen); if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) - iscsi_recv_digest_update(tcp_conn, tcp_conn->data, + iscsi_recv_digest_update(tcp_conn, conn->data, tcp_conn->in.datalen); break; default: @@ -843,7 +866,7 @@ more: if (rc == -EAGAIN) goto nomore; else { - iscsi_conn_failure(conn, rc); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } @@ -897,7 +920,7 @@ more: if (rc) { if (rc == -EAGAIN) goto again; - iscsi_conn_failure(conn, rc); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } tcp_conn->in.copy -= tcp_conn->in.padding; @@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn) } static void -iscsi_conn_restore_callbacks(struct iscsi_conn *conn) +iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) { - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct sock *sk = tcp_conn->sock->sk; /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ @@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) ctask->imm_count - ctask->unsol_count; - debug_scsi("cmd [itt %x total %d imm %d imm_data %d " + debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " "r2t_data %d]\n", ctask->itt, ctask->total_length, ctask->imm_count, ctask->unsol_count, tcp_ctask->r2t_data_count); @@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } solicit_again: /* - * send Data-Out whitnin this R2T sequence. + * send Data-Out within this R2T sequence. */ if (!r2t->data_count) goto data_out_done; @@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_data_task *dtask = tcp_ctask->dtask; - int sent, rc; + int sent = 0, rc; tcp_ctask->xmstate &= ~XMSTATE_W_PAD; iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, @@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; /* initial operational parameters */ tcp_conn->hdr_size = sizeof(struct iscsi_hdr); - tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; - - /* allocate initial PDU receive place holder */ - if (tcp_conn->data_size <= PAGE_SIZE) - tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); - else - tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, - get_order(tcp_conn->data_size)); - if (!tcp_conn->data) - goto max_recv_dlenght_alloc_fail; return cls_conn; -max_recv_dlenght_alloc_fail: - kfree(tcp_conn); tcp_conn_alloc_fail: iscsi_conn_teardown(cls_conn); return NULL; } static void +iscsi_tcp_release_conn(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + + if (!tcp_conn->sock) + return; + + sock_hold(tcp_conn->sock->sk); + iscsi_conn_restore_callbacks(tcp_conn); + sock_put(tcp_conn->sock->sk); + + sock_release(tcp_conn->sock); + tcp_conn->sock = NULL; + conn->recv_lock = NULL; +} + +static void iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; @@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) if (conn->hdrdgst_en || conn->datadgst_en) digest = 1; + iscsi_tcp_release_conn(conn); iscsi_conn_teardown(cls_conn); /* now free tcp_conn */ @@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) crypto_free_tfm(tcp_conn->data_rx_tfm); } - /* free conn->data, size = MaxRecvDataSegmentLength */ - if (tcp_conn->data_size <= PAGE_SIZE) - kfree(tcp_conn->data); - else - free_pages((unsigned long)tcp_conn->data, - get_order(tcp_conn->data_size)); kfree(tcp_conn); } +static void +iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + iscsi_conn_stop(cls_conn, flag); + iscsi_tcp_release_conn(conn); +} + static int iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, @@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, return 0; } -static void -iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) -{ - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_r2t_info *r2t; - - /* flush ctask's r2t queues */ - while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, - sizeof(void*)); - - __iscsi_ctask_cleanup(conn, ctask); -} - -static void -iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct sock *sk; - - if (!tcp_conn->sock) - return; - - sk = tcp_conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - write_unlock_bh(&sk->sk_callback_lock); -} - -static void -iscsi_tcp_terminate_conn(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - - if (!tcp_conn->sock) - return; - - sock_hold(tcp_conn->sock->sk); - iscsi_conn_restore_callbacks(conn); - sock_put(tcp_conn->sock->sk); - - sock_release(tcp_conn->sock); - tcp_conn->sock = NULL; - conn->recv_lock = NULL; -} - /* called with host lock */ static void iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, @@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); tcp_mtask->xmstate = XMSTATE_IMM_HDR; + tcp_mtask->sent = 0; if (mtask->data_count) iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, @@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, int value; switch(param) { - case ISCSI_PARAM_MAX_RECV_DLENGTH: { - char *saveptr = tcp_conn->data; - gfp_t flags = GFP_KERNEL; - - sscanf(buf, "%d", &value); - if (tcp_conn->data_size >= value) { - iscsi_set_param(cls_conn, param, buf, buflen); - break; - } - - spin_lock_bh(&session->lock); - if (conn->stop_stage == STOP_CONN_RECOVER) - flags = GFP_ATOMIC; - spin_unlock_bh(&session->lock); - - if (value <= PAGE_SIZE) - tcp_conn->data = kmalloc(value, flags); - else - tcp_conn->data = (void*)__get_free_pages(flags, - get_order(value)); - if (tcp_conn->data == NULL) { - tcp_conn->data = saveptr; - return -ENOMEM; - } - if (tcp_conn->data_size <= PAGE_SIZE) - kfree(saveptr); - else - free_pages((unsigned long)saveptr, - get_order(tcp_conn->data_size)); - iscsi_set_param(cls_conn, param, buf, buflen); - tcp_conn->data_size = value; - break; - } case ISCSI_PARAM_HDRDGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); tcp_conn->hdr_size = sizeof(struct iscsi_hdr); @@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) } static struct scsi_host_template iscsi_sht = { - .name = "iSCSI Initiator over TCP/IP, v" - ISCSI_TCP_VERSION, + .name = "iSCSI Initiator over TCP/IP", .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .can_queue = ISCSI_XMIT_CMDS_MAX - 1, @@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = { .get_conn_param = iscsi_tcp_conn_get_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_conn_start, - .stop_conn = iscsi_conn_stop, - /* these are called as part of conn recovery */ - .suspend_conn_recv = iscsi_tcp_suspend_conn_rx, - .terminate_conn = iscsi_tcp_terminate_conn, + .stop_conn = iscsi_tcp_conn_stop, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_conn_get_stats, diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 808302832e6..6a4ee704e46 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -78,8 +78,6 @@ struct iscsi_tcp_conn { char hdrext[4*sizeof(__u16) + sizeof(__u32)]; int data_copied; - char *data; /* data placeholder */ - int data_size; /* actual recv_dlength */ int stop_stage; /* conn_stop() flag: * * stop to recover, * * stop to terminate */ diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7e6e031cc41..5884cd26d53 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session, { struct scsi_cmnd *sc = ctask->sc; + ctask->state = ISCSI_TASK_COMPLETED; ctask->sc = NULL; list_del_init(&ctask->running); __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); @@ -275,6 +276,25 @@ out: return rc; } +static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) +{ + struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; + + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + conn->tmfrsp_pdus_cnt++; + + if (conn->tmabort_state != TMABORT_INITIAL) + return; + + if (tmf->response == ISCSI_TMF_RSP_COMPLETE) + conn->tmabort_state = TMABORT_SUCCESS; + else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) + conn->tmabort_state = TMABORT_NOT_FOUND; + else + conn->tmabort_state = TMABORT_FAILED; + wake_up(&conn->ehwait); +} + /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn @@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, switch(opcode) { case ISCSI_OP_LOGOUT_RSP: + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; /* fall through */ case ISCSI_OP_LOGIN_RSP: @@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * login related PDU's exp_statsn is handled in * userspace */ - rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, break; } - conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - conn->tmfrsp_pdus_cnt++; - if (conn->tmabort_state == TMABORT_INITIAL) { - conn->tmabort_state = - ((struct iscsi_tm_rsp *)hdr)-> - response == ISCSI_TMF_RSP_COMPLETE ? - TMABORT_SUCCESS:TMABORT_FAILED; - /* unblock eh_abort() */ - wake_up(&conn->ehwait); - } + iscsi_tmf_rsp(conn, hdr); break; case ISCSI_OP_NOOP_IN: - if (hdr->ttt != ISCSI_RESERVED_TAG) { + if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else if (itt == ISCSI_RESERVED_TAG) { switch(opcode) { case ISCSI_OP_NOOP_IN: - if (!datalen) { - rc = iscsi_check_assign_cmdsn(session, - (struct iscsi_nopin*)hdr); - if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) - rc = iscsi_recv_pdu(conn->cls_conn, - hdr, NULL, 0); - } else + if (datalen) { rc = ISCSI_ERR_PROTO; + break; + } + + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + break; + + if (hdr->ttt == ISCSI_RESERVED_TAG) + break; + + if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) + rc = ISCSI_ERR_CONN_FAILED; break; case ISCSI_OP_REJECT: /* we need sth like iscsi_reject_rsp()*/ @@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } /* process command queue */ - while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, - sizeof(void*))) { + spin_lock_bh(&conn->session->lock); + while (!list_empty(&conn->xmitqueue)) { /* * iscsi tcp may readd the task to the xmitqueue to send * write data */ - spin_lock_bh(&conn->session->lock); - if (list_empty(&conn->ctask->running)) - list_add_tail(&conn->ctask->running, &conn->run_list); + conn->ctask = list_entry(conn->xmitqueue.next, + struct iscsi_cmd_task, running); + conn->ctask->state = ISCSI_TASK_RUNNING; + list_move_tail(conn->xmitqueue.next, &conn->run_list); spin_unlock_bh(&conn->session->lock); + rc = tt->xmit_cmd_task(conn, conn->ctask); if (rc) goto again; + spin_lock_bh(&conn->session->lock); } + spin_unlock_bh(&conn->session->lock); /* done with this ctask */ conn->ctask = NULL; @@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.phase = session->age; sc->SCp.ptr = (char *)ctask; + ctask->state = ISCSI_TASK_PENDING; ctask->mtask = NULL; ctask->conn = conn; ctask->sc = sc; @@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) session->tt->init_cmd_task(ctask); - __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + list_add_tail(&ctask->running, &conn->xmitqueue); debug_scsi( "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", @@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, /* * xmit mutex and session lock must be held */ -#define iscsi_remove_task(tasktype) \ -static struct iscsi_##tasktype * \ -iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ -{ \ - int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ - struct iscsi_##tasktype *task; \ - \ - debug_scsi("searching %d tasks\n", nr_tasks); \ - \ - for (i = 0; i < nr_tasks; i++) { \ - __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ - debug_scsi("check task %u\n", task->itt); \ - \ - if (task->itt == itt) { \ - debug_scsi("matched task\n"); \ - return task; \ - } \ - \ - __kfifo_put(fifo, (void*)&task, sizeof(void*)); \ - } \ - return NULL; \ -} +static struct iscsi_mgmt_task * +iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) +{ + int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); + struct iscsi_mgmt_task *task; -iscsi_remove_task(mgmt_task); -iscsi_remove_task(cmd_task); + debug_scsi("searching %d tasks\n", nr_tasks); + + for (i = 0; i < nr_tasks; i++) { + __kfifo_get(fifo, (void*)&task, sizeof(void*)); + debug_scsi("check task %u\n", task->itt); + + if (task->itt == itt) { + debug_scsi("matched task\n"); + return task; + } + + __kfifo_put(fifo, (void*)&task, sizeof(void*)); + } + return NULL; +} static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) { @@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, { struct scsi_cmnd *sc; - conn->session->tt->cleanup_cmd_task(conn, ctask); - iscsi_ctask_mtask_cleanup(ctask); - sc = ctask->sc; if (!sc) return; + + conn->session->tt->cleanup_cmd_task(conn, ctask); + iscsi_ctask_mtask_cleanup(ctask); + sc->result = err; sc->resid = sc->request_bufflen; iscsi_complete_command(conn->session, ctask); @@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; struct iscsi_conn *conn = ctask->conn; struct iscsi_session *session = conn->session; - struct iscsi_cmd_task *pending_ctask; int rc; conn->eh_abort_cnt++; @@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; /* ctask completed before time out */ - if (!ctask->sc) - goto success; + if (!ctask->sc) { + spin_unlock_bh(&session->lock); + debug_scsi("sc completed while abort in progress\n"); + goto success_rel_mutex; + } /* what should we do here ? */ if (conn->ctask == ctask) { @@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; } - /* check for the easy pending cmd abort */ - pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); - if (pending_ctask) { - /* iscsi_tcp queues write transfers on the xmitqueue */ - if (list_empty(&pending_ctask->running)) { - debug_scsi("found pending task\n"); - goto success; - } else - __kfifo_put(conn->xmitqueue, (void*)&pending_ctask, - sizeof(void*)); - } + if (ctask->state == ISCSI_TASK_PENDING) + goto success_cleanup; conn->tmabort_state = TMABORT_INITIAL; @@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) rc = iscsi_exec_abort_task(sc, ctask); spin_lock_bh(&session->lock); - iscsi_ctask_mtask_cleanup(ctask); if (rc || sc->SCp.phase != session->age || session->state != ISCSI_STATE_LOGGED_IN) goto failed; + iscsi_ctask_mtask_cleanup(ctask); - /* ctask completed before tmf abort response */ - if (!ctask->sc) { - debug_scsi("sc completed while abort in progress\n"); - goto success; - } - - if (conn->tmabort_state != TMABORT_SUCCESS) { + switch (conn->tmabort_state) { + case TMABORT_SUCCESS: + goto success_cleanup; + case TMABORT_NOT_FOUND: + if (!ctask->sc) { + /* ctask completed before tmf abort response */ + spin_unlock_bh(&session->lock); + debug_scsi("sc completed while abort in progress\n"); + goto success_rel_mutex; + } + /* fall through */ + default: + /* timedout or failed */ spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->lock); goto failed; } -success: +success_cleanup: debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); spin_unlock_bh(&session->lock); @@ -1121,6 +1146,7 @@ success: spin_unlock(&session->lock); write_unlock_bh(conn->recv_lock); +success_rel_mutex: mutex_unlock(&conn->xmitmutex); return SUCCESS; @@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (cmd_task_size) ctask->dd_data = &ctask[1]; ctask->itt = cmd_i; + INIT_LIST_HEAD(&ctask->running); } spin_lock_init(&session->lock); @@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (mgmt_task_size) mtask->dd_data = &mtask[1]; mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; + INIT_LIST_HEAD(&mtask->running); } if (scsi_add_host(shost, NULL)) @@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct module *owner = cls_session->transport->owner; scsi_remove_host(shost); iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); iscsi_pool_free(&session->cmdpool, (void**)session->cmds); + kfree(session->targetname); + iscsi_destroy_session(cls_session); scsi_host_put(shost); - module_put(cls_session->transport->owner); + module_put(owner); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); @@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) conn->tmabort_state = TMABORT_INITIAL; INIT_LIST_HEAD(&conn->run_list); INIT_LIST_HEAD(&conn->mgmt_run_list); - - /* initialize general xmit PDU commands queue */ - conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), - GFP_KERNEL, NULL); - if (conn->xmitqueue == ERR_PTR(-ENOMEM)) - goto xmitqueue_alloc_fail; + INIT_LIST_HEAD(&conn->xmitqueue); /* initialize general immediate & non-immediate PDU commands queue */ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), @@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); if (!data) goto login_mtask_data_alloc_fail; - conn->login_mtask->data = data; + conn->login_mtask->data = conn->data = data; init_timer(&conn->tmabort_timer); mutex_init(&conn->xmitmutex); @@ -1410,8 +1436,6 @@ login_mtask_alloc_fail: mgmtqueue_alloc_fail: kfifo_free(conn->immqueue); immqueue_alloc_fail: - kfifo_free(conn->xmitqueue); -xmitqueue_alloc_fail: iscsi_destroy_conn(cls_conn); return NULL; } @@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); mutex_lock(&conn->xmitmutex); - if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) { - if (session->tt->suspend_conn_recv) - session->tt->suspend_conn_recv(conn); - - session->tt->terminate_conn(conn); - } spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; @@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) } spin_lock_bh(&session->lock); - kfree(conn->login_mtask->data); + kfree(conn->data); + kfree(conn->persistent_address); __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, sizeof(void*)); list_del(&conn->item); @@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; spin_unlock_bh(&session->lock); - kfifo_free(conn->xmitqueue); kfifo_free(conn->immqueue); kfifo_free(conn->mgmtqueue); @@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn) struct iscsi_cmd_task *ctask, *tmp; /* flush pending */ - while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { + list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, ctask->itt); fail_command(conn, ctask, DID_BUS_BUSY << 16); @@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); spin_unlock_bh(&session->lock); - if (session->tt->suspend_conn_recv) - session->tt->suspend_conn_recv(conn); + write_lock_bh(conn->recv_lock); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(conn->recv_lock); mutex_lock(&conn->xmitmutex); /* @@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, } } - session->tt->terminate_conn(conn); /* * flush queues. */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5c68cdd8736..76f8bd53e23 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) phba->sysfs_mbox.mbox == NULL ) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } } @@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - phba->fc_ratov * 2); + lpfc_mbox_tmo_val(phba, + phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); spin_lock_irq(phba->host->host_lock); } if (rc != MBX_SUCCESS) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -ENODEV; + return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; } @@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); @@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost) struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; + unsigned long seconds; int rc = 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; + hs->link_failure_count -= lso->link_failure_count; + hs->loss_of_sync_count -= lso->loss_of_sync_count; + hs->loss_of_signal_count -= lso->loss_of_signal_count; + hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; + hs->invalid_tx_word_count -= lso->invalid_tx_word_count; + hs->invalid_crc_count -= lso->invalid_crc_count; + hs->error_frames -= lso->error_frames; + if (phba->fc_topology == TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); + hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); + hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; -/* FIX ME */ - /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ + seconds = get_seconds(); + if (seconds < psli->stats_start) + hs->seconds_since_last_reset = seconds + + ((unsigned long)-1 - psli->stats_start); + else + hs->seconds_since_last_reset = seconds - psli->stats_start; return hs; } +static void +lpfc_reset_stats(struct Scsi_Host *shost) +{ + struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return; + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + pmb = &pmboxq->mb; + pmb->mbxCommand = MBX_READ_STATUS; + pmb->mbxOwner = OWN_HOST; + pmb->un.varWords[0] = 0x1; /* reset request */ + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmb->mbxCommand = MBX_READ_LNK_STAT; + pmb->mbxOwner = OWN_HOST; + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free( pmboxq, phba->mbox_mem_pool); + return; + } + + lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; + lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; + lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; + lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; + lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; + lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; + lso->error_frames = pmb->un.varRdLnk.crcCnt; + lso->link_events = (phba->fc_eventTag >> 1); + + psli->stats_start = get_seconds(); + + return; +} /* * The LPFC driver treats linkdown handling as target loss events so there @@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = { */ .get_fc_host_stats = lpfc_get_stats, - - /* the LPFC driver doesn't support resetting stats yet */ + .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 517e9e4dd46..2a176467f71 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +int lpfc_mbox_tmo_val(struct lpfc_hba *, int); int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index b65ee57af53..bbb7310210b 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, } ct_unsol_event_exit_piocbq: + list_del(&head); if (pmbuf) { list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { lpfc_mbuf_free(phba, matp->virt, matp->phys); @@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0239 NameServer Rsp " + "%d:0208 NameServer Rsp " "Data: x%x\n", phba->brd_no, phba->fc_flag); @@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) lpfc_decode_firmware_rev(phba, fwrev, 0); - if (phba->Port[0]) { - sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, - phba->Port, fwrev, lpfc_release_version); - } else { - sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, - fwrev, lpfc_release_version); - } + sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, + fwrev, lpfc_release_version); + return; } /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b89f6cb641e..60f5cca0abe 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2839,7 +2839,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* Xmit ELS RPS ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPS ACC response tag x%x " + "%d:0118 Xmit ELS RPS ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -2948,7 +2948,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPL ACC response tag x%x " + "%d:0120 Xmit ELS RPL ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -3109,7 +3109,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist *ndlp, *next_ndlp; /* FAN received */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", phba->brd_no); icmd = &cmdiocb->iocb; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 4d6cf990c4f..1c3f2689155 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1782,7 +1782,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to REGLOGIN */ /* FIND node DID reglogin */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID reglogin" + "%d:0901 FIND node DID reglogin" " Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1805,7 +1805,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to PRLI */ /* FIND node DID prli */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID prli " + "%d:0902 FIND node DID prli " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1828,7 +1828,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to NPR */ /* FIND node DID npr */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID npr " + "%d:0903 FIND node DID npr " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1851,7 +1851,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to UNUSED */ /* FIND node DID unused */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID unused " + "%d:0905 FIND node DID unused " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -2335,7 +2335,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0206 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; @@ -2365,7 +2365,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) if (!clearlambox) { clrlaerr = 1; lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0207 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ef47b824cbe..16dc8c82b5c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1616,7 +1616,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocbq; } - /* We can rely on a queue depth attribute only after SLI HBA setup */ + /* + * Set initial can_queue value since 0 is no longer supported and + * scsi_add_host will fail. This will be adjusted later based on the + * max xri value determined in hba setup. + */ host->can_queue = phba->cfg_hba_queue_depth - 10; /* Tell the midlayer we support 16 byte commands */ @@ -1656,6 +1660,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_irq; } + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ + host->can_queue = phba->cfg_hba_queue_depth - 10; + lpfc_discovery_wait(phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71..4d016c2a1b2 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } + +int +lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) +{ + switch (cmd) { + case MBX_WRITE_NV: /* 0x03 */ + case MBX_UPDATE_CFG: /* 0x1B */ + case MBX_DOWN_LOAD: /* 0x1C */ + case MBX_DEL_LD_ENTRY: /* 0x1D */ + case MBX_LOAD_AREA: /* 0x81 */ + case MBX_FLASH_WR_ULA: /* 0x98 */ + case MBX_LOAD_EXP_ROM: /* 0x9C */ + return LPFC_MBOX_TMO_FLASH_CMD; + } + return LPFC_MBOX_TMO; +} diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bd0b0e293d6..b38021a32c9 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0201 Abort outstanding I/O on NPort x%x " + "%d:0205 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a760a44173d..0811c824e0e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1104,7 +1104,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0713 Bus Reset on target %d failed\n", + "%d:0700 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 350a625fa22..70f4d5a1348 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) kfree(old_arr); return iotag; } - } + } else + spin_unlock_irq(phba->host->host_lock); lpfc_printf_log(phba, KERN_ERR,LOG_SLI, "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", @@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed." - " Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0314 IOCB cmd 0x%x" + " processed. Skipping" + " completion", phba->brd_no, + irsp->ulpCommand); break; } @@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, if (unlikely(irsp->ulpStatus)) { /* Rsp ring <ringno> error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "%d:0326 Rsp Ring %d error: IOCB Data: " + "%d:0336 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], @@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " - "Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0333 IOCB cmd 0x%x" + " processed. Skipping" + " completion\n", phba->brd_no, + irsp->ulpCommand); break; } @@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0334 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", phba->brd_no, type, irsp->ulpCommand, irsp->ulpStatus, irsp->ulpIoTag, @@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0312 Ring %d handler: portRspPut %d " + "%d:0303 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", phba->brd_no, pring->ringno, portRspPut, portRspMax); @@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0335 Unknown IOCB command " "Data: x%x x%x x%x x%x\n", phba->brd_no, irsp->ulpCommand, @@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, next_iocb, &saveq->list, list) { + list_del(&rspiocbp->list); lpfc_sli_release_iocbq(phba, rspiocbp); } } - lpfc_sli_release_iocbq(phba, saveq); } } @@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) phba->fc_myDID = 0; phba->fc_prevDID = 0; - psli->sli_flag = 0; - /* Turn off parity checking and serr during the physical reset */ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, + "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, phba->hba_state, psli->sli_flag); word0 = 0; @@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + if (skip_post) mdelay(100); else @@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) } while (resetcount < 2 && !done) { + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); phba->hba_state = LPFC_STATE_UNKNOWN; lpfc_sli_brdrestart(phba); msleep(2500); @@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) if (rc) break; + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); resetcount++; /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 @@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) return (MBX_NOT_FINISHED); } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); } /* Mailbox cmd <cmd> issue */ @@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) break; case MBX_POLL: - i = 0; psli->mbox_active = NULL; if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ @@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Read the HBA Host Attention Register */ ha_copy = readl(phba->HAregaddr); + i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); + i *= 1000; /* Convert to ms */ + /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->hba_state > LPFC_WARM_START))) { - if (i++ >= 100) { + if (i-- <= 0) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); @@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Can be in interrupt context, do not sleep */ /* (or might be called with interrupts disabled) */ - mdelay(i); + mdelay(1); spin_lock_irqsave(phba->host->host_lock, drvr_flag); @@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0329 IOCB wait timeout error - no " + "%d:0338 IOCB wait timeout error - no " "wake response Data x%x\n", phba->brd_no, timeout); retval = IOCB_TIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index d8ef0d2894d..e26de680935 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -172,6 +172,18 @@ struct lpfc_sli_stat { uint32_t mbox_busy; /* Mailbox cmd busy */ }; +/* Structure to store link status values when port stats are reset */ +struct lpfc_lnk_stat { + uint32_t link_failure_count; + uint32_t loss_of_sync_count; + uint32_t loss_of_signal_count; + uint32_t prim_seq_protocol_err_count; + uint32_t invalid_tx_word_count; + uint32_t invalid_crc_count; + uint32_t error_frames; + uint32_t link_events; +}; + /* Structure used to hold SLI information */ struct lpfc_sli { uint32_t num_rings; @@ -201,6 +213,8 @@ struct lpfc_sli { struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ size_t iocbq_lookup_len; /* current lengs of the array */ uint16_t last_iotag; /* last allocated IOTAG */ + unsigned long stats_start; /* in seconds */ + struct lpfc_lnk_stat lnk_stat_offsets; }; /* Given a pointer to the start of the ring, and the slot number of @@ -211,3 +225,9 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write + * or erase cmds. This is especially + * long because of the potential of + * multiple flash erases that can be + * spawned. + */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 10e89c6ae82..c7963f91d0a 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.7" +#define LPFC_DRIVER_VERSION "8.1.8" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 4675343228a..8cd0bd1d0f7 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h @@ -37,6 +37,12 @@ #define LSI_MAX_CHANNELS 16 #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) +#define HBA_SIGNATURE_64_BIT 0x299 +#define PCI_CONF_AMISIG64 0xa4 + +#define MEGA_SCSI_INQ_EVPD 1 +#define MEGA_INVALID_FIELD_IN_CDB 0x24 + /** * scb_t - scsi command control block diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index bdaee144a1c..b8aa34202ec 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -132,6 +132,10 @@ typedef struct uioc { /* Driver Data: */ void __user * user_data; uint32_t user_data_len; + + /* 64bit alignment */ + uint32_t pad_for_64bit_align; + mraid_passthru_t __user *user_pthru; mraid_passthru_t *pthru32; diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 92715130ac0..cd982c877da 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.8 (Apr 11 2006) + * Version : v2.20.4.9 (Jul 16 2006) * * Authors: * Atul Mukker <Atul.Mukker@lsil.com> @@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter) struct pci_dev *pdev; mraid_device_t *raid_dev; int i; + uint32_t magic64; adapter->ito = MBOX_TIMEOUT; @@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter) // Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range - if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { - - con_log(CL_ANN, (KERN_WARNING - "megaraid: could not set DMA mask for 64-bit.\n")); + pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); + + if (((magic64 == HBA_SIGNATURE_64_BIT) && + ((adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_6) || + (adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_VERDE) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { + if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: DMA mask for 64-bit failed\n")); - goto out_free_sysfs_res; + if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: 32-bit DMA mask failed\n")); + goto out_free_sysfs_res; + } + } } // setup tasklet for DPC @@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) rdev->last_disp |= (1L << SCP2CHANNEL(scp)); } + if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { + scp->sense_buffer[0] = 0x70; + scp->sense_buffer[2] = ILLEGAL_REQUEST; + scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; + scp->result = CHECK_CONDITION << 1; + return NULL; + } + /* Fall through */ case READ_CAPACITY: diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 868fb0ec93e..2b5a3285f79 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.8" -#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" +#define MEGARAID_VERSION "2.20.4.9" +#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" /* diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index e8f534fb336..d85b9a8f1b8 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mm.c - * Version : v2.20.2.6 (Mar 7 2005) + * Version : v2.20.2.7 (Jul 16 2006) * * Common management module */ diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index 3d9e67d6849..c8762b2b8ed 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -27,9 +27,9 @@ #include "megaraid_ioctl.h" -#define LSI_COMMON_MOD_VERSION "2.20.2.6" +#define LSI_COMMON_MOD_VERSION "2.20.2.7" #define LSI_COMMON_MOD_EXT_VERSION \ - "(Release Date: Mon Mar 7 00:01:03 EST 2005)" + "(Release Date: Sun Jul 16 00:01:03 EST 2006)" #define LSI_DBGLVL dbglevel diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 139ea0e27fd..0930260aec2 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -487,6 +487,7 @@ typedef struct { #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ /* used. */ +#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2b60a27eff0..c5b3c610a32 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, mrk24->nport_handle = cpu_to_le16(loop_id); mrk24->lun[1] = LSB(lun); mrk24->lun[2] = MSB(lun); + host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 795bf15b1b8..de0613135f7 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); break; + + case MBA_TRACE_NOTIFICATION: + DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", + ha->host_no, mb[1], mb[2])); + break; } } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ec7ebb6037e..1e2b95bac83 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index d2d68344065..f5826bf04a2 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.01.05-k3" +#define QLA2XXX_VERSION "8.01.05-k4" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 1 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7b9e8fa1a4e..2ecd1418857 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -34,6 +34,7 @@ #define ISCSI_SESSION_ATTRS 11 #define ISCSI_CONN_ATTRS 11 #define ISCSI_HOST_ATTRS 0 +#define ISCSI_TRANSPORT_VERSION "1.1-646" struct iscsi_internal { int daemon_pid; @@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone) } static int -iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) +iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp) { unsigned long flags; int rc; skb_get(skb); - rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); + rc = netlink_broadcast(nls, skb, 0, 1, gfp); if (rc < 0) { mempool_free(skb, zone->pool); printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); @@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_broadcast_skb(conn->z_error, skb); + iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC); dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", error); @@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb); + rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session destruction event. Check iscsi daemon\n"); @@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb); + rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session creation event. Check iscsi daemon\n"); @@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void) { int err; + printk(KERN_INFO "Loading iSCSI transport class v%s.", + ISCSI_TRANSPORT_VERSION); + err = class_register(&iscsi_transport_class); if (err) return err; @@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " "Alex Aizman <itn780@yahoo.com>"); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); +MODULE_VERSION(ISCSI_TRANSPORT_VERSION); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8c505076c0e..739d3ef46a4 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, |