[SCSI] zfcp: Cleanup qdio code

Cleanup the interface code from zfcp to qdio. Also move code that
belongs to the qdio interface from the erp to the qdio file.

Signed-off-by: Swen Schillig <swen@vnet.ibm.com>
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
This commit is contained in:
Swen Schillig 2008-06-10 18:20:57 +02:00 committed by James Bottomley
parent fa04c28168
commit 00bab91066
7 changed files with 431 additions and 806 deletions

View file

@ -606,7 +606,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
struct zfcp_adapter *
zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
int retval = 0;
struct zfcp_adapter *adapter;
/*
@ -627,19 +626,11 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
/* save ccw_device pointer */
adapter->ccw_device = ccw_device;
retval = zfcp_qdio_allocate_queues(adapter);
if (retval)
goto queues_alloc_failed;
retval = zfcp_qdio_allocate(adapter);
if (retval)
if (zfcp_qdio_allocate(adapter))
goto qdio_allocate_failed;
retval = zfcp_allocate_low_mem_buffers(adapter);
if (retval) {
ZFCP_LOG_INFO("error: pool allocation failed\n");
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed_low_mem_buffers;
}
/* initialise reference count stuff */
atomic_set(&adapter->refcount, 0);
@ -653,11 +644,8 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
/* initialize list of fsf requests */
spin_lock_init(&adapter->req_list_lock);
retval = zfcp_reqlist_alloc(adapter);
if (retval) {
ZFCP_LOG_INFO("request list initialization failed\n");
if (zfcp_reqlist_alloc(adapter))
goto failed_low_mem_buffers;
}
/* initialize debug locks */
@ -666,8 +654,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
spin_lock_init(&adapter->scsi_dbf_lock);
spin_lock_init(&adapter->rec_dbf_lock);
retval = zfcp_adapter_debug_register(adapter);
if (retval)
if (zfcp_adapter_debug_register(adapter))
goto debug_register_failed;
/* initialize error recovery stuff */
@ -685,7 +672,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
init_waitqueue_head(&adapter->erp_done_wqh);
/* initialize lock of associated request queue */
rwlock_init(&adapter->request_queue.queue_lock);
rwlock_init(&adapter->req_q.lock);
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
/* mark adapter unusable as long as sysfs registration is not complete */
@ -723,12 +710,8 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
zfcp_reqlist_free(adapter);
failed_low_mem_buffers:
zfcp_free_low_mem_buffers(adapter);
if (qdio_free(ccw_device) != 0)
ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
zfcp_get_busid_by_adapter(adapter));
qdio_allocate_failed:
zfcp_qdio_free_queues(adapter);
queues_alloc_failed:
zfcp_qdio_free(adapter);
kfree(adapter);
adapter = NULL;
out:
@ -757,10 +740,6 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
retval = zfcp_reqlist_isempty(adapter);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
if (!retval) {
ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
"%i requests outstanding\n",
zfcp_get_busid_by_adapter(adapter), adapter,
atomic_read(&adapter->reqs_active));
retval = -EBUSY;
goto out;
}
@ -775,19 +754,9 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
/* decrease number of adapters in list */
zfcp_data.adapters--;
ZFCP_LOG_TRACE("adapter %s (%p) removed from list, "
"%i adapters still in list\n",
zfcp_get_busid_by_adapter(adapter),
adapter, zfcp_data.adapters);
retval = qdio_free(adapter->ccw_device);
if (retval)
ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n",
zfcp_get_busid_by_adapter(adapter));
zfcp_qdio_free(adapter);
zfcp_free_low_mem_buffers(adapter);
/* free memory of adapter data structure and queues */
zfcp_qdio_free_queues(adapter);
zfcp_reqlist_free(adapter);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);

View file

@ -603,13 +603,14 @@ static const char *zfcp_rec_dbf_ids[] = {
[137] = "hbaapi port open",
[138] = "hbaapi unit open",
[139] = "hbaapi unit shutdown",
[140] = "qdio error",
[140] = "qdio error outbound",
[141] = "scsi host reset",
[142] = "dismissing fsf request for recovery action",
[143] = "recovery action timed out",
[144] = "recovery action gone",
[145] = "recovery action being processed",
[146] = "recovery action ready for next step",
[147] = "qdio error inbound",
};
static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,

View file

@ -112,21 +112,10 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
/* max. number of (data buffer) SBALEs in largest SBAL chain
multiplied with number of sectors per 4k block */
/* FIXME(tune): free space should be one max. SBAL chain plus what? */
#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \
- (ZFCP_MAX_SBALS_PER_REQ + 4))
#define ZFCP_SBAL_TIMEOUT (5*HZ)
#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */
/* queue polling (values in microseconds) */
#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
/********************* FSF SPECIFIC DEFINES *********************************/
@ -649,13 +638,13 @@ struct zfcp_send_els {
};
struct zfcp_qdio_queue {
struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
u8 free_index; /* index of next free bfr
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
u8 first; /* index of next free bfr
in queue (free_count>0) */
atomic_t free_count; /* number of free buffers
atomic_t count; /* number of free buffers
in queue */
rwlock_t queue_lock; /* lock for operations on queue */
int distance_from_int; /* SBALs used since PCI indication
rwlock_t lock; /* lock for operations on queue */
int pci_batch; /* SBALs since PCI indication
was last set */
};
@ -711,15 +700,14 @@ struct zfcp_adapter {
struct list_head port_remove_lh; /* head of ports to be
removed */
u32 ports; /* number of remote ports */
atomic_t reqs_active; /* # active FSF reqs */
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue request_queue; /* request queue */
struct zfcp_qdio_queue req_q; /* request queue */
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
more avaliable SBALs */
struct zfcp_qdio_queue response_queue; /* response queue */
struct zfcp_qdio_queue resp_q; /* response queue */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
completion races */

View file

@ -113,41 +113,6 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *);
static void zfcp_erp_memwait_handler(unsigned long);
/**
* zfcp_close_qdio - close qdio queues for an adapter
*/
static void zfcp_close_qdio(struct zfcp_adapter *adapter)
{
struct zfcp_qdio_queue *req_queue;
int first, count;
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
req_queue = &adapter->request_queue;
write_lock_irq(&req_queue->queue_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
write_unlock_irq(&req_queue->queue_lock);
while (qdio_shutdown(adapter->ccw_device,
QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
ssleep(1);
/* cleanup used outbound sbals */
count = atomic_read(&req_queue->free_count);
if (count < QDIO_MAX_BUFFERS_PER_Q) {
first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q;
count = QDIO_MAX_BUFFERS_PER_Q - count;
zfcp_qdio_zero_sbals(req_queue->buffer, first, count);
}
req_queue->free_index = 0;
atomic_set(&req_queue->free_count, 0);
req_queue->distance_from_int = 0;
adapter->response_queue.free_index = 0;
atomic_set(&adapter->response_queue.free_count, 0);
}
/**
* zfcp_close_fsf - stop FSF operations for an adapter
*
@ -158,7 +123,7 @@ static void zfcp_close_qdio(struct zfcp_adapter *adapter)
static void zfcp_close_fsf(struct zfcp_adapter *adapter)
{
/* close queues to ensure that buffers are not accessed by adapter */
zfcp_close_qdio(adapter);
zfcp_qdio_close(adapter);
zfcp_fsf_req_dismiss_all(adapter);
/* reset FSF request sequence number */
adapter->fsf_req_seq_no = 0;
@ -1735,88 +1700,17 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
static int
zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
{
int retval;
int i;
volatile struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on "
"adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
goto failed_sanity;
}
if (qdio_establish(&adapter->qdio_init_data) != 0) {
ZFCP_LOG_INFO("error: establishment of QDIO queues failed "
"on adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
goto failed_qdio_establish;
}
if (qdio_activate(adapter->ccw_device, 0) != 0) {
ZFCP_LOG_INFO("error: activation of QDIO queues failed "
"on adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
goto failed_qdio_activate;
}
/*
* put buffers into response queue,
*/
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
sbale = &(adapter->response_queue.buffer[i]->element[0]);
sbale->length = 0;
sbale->flags = SBAL_FLAGS_LAST_ENTRY;
sbale->addr = NULL;
}
ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
"queue_no=%i, index_in_queue=%i, count=%i)\n",
zfcp_get_busid_by_adapter(adapter),
QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q);
retval = do_QDIO(adapter->ccw_device,
QDIO_FLAG_SYNC_INPUT,
0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL);
if (retval) {
ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n",
retval);
goto failed_do_qdio;
} else {
adapter->response_queue.free_index = 0;
atomic_set(&adapter->response_queue.free_count, 0);
ZFCP_LOG_DEBUG("%i buffers successfully enqueued to "
"response queue\n", QDIO_MAX_BUFFERS_PER_Q);
}
/* set index of first avalable SBALS / number of available SBALS */
adapter->request_queue.free_index = 0;
atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
adapter->request_queue.distance_from_int = 0;
if (zfcp_qdio_open(adapter))
return ZFCP_ERP_FAILED;
/* initialize waitqueue used to wait for free SBALs in requests queue */
init_waitqueue_head(&adapter->request_wq);
/* ok, we did it - skip all cleanups for different failures */
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
retval = ZFCP_ERP_SUCCEEDED;
goto out;
failed_do_qdio:
/* NOP */
failed_qdio_activate:
while (qdio_shutdown(adapter->ccw_device,
QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
ssleep(1);
failed_qdio_establish:
failed_sanity:
retval = ZFCP_ERP_FAILED;
out:
return retval;
return ZFCP_ERP_SUCCEEDED;
}

View file

@ -57,21 +57,17 @@ extern int zfcp_ccw_register(void);
extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
extern int zfcp_qdio_allocate(struct zfcp_adapter *);
extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
struct zfcp_fsf_req *);
extern void zfcp_qdio_free(struct zfcp_adapter *);
extern int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
(struct zfcp_fsf_req *, int, int);
(struct zfcp_fsf_req *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr
(struct zfcp_fsf_req *);
extern int zfcp_qdio_sbals_from_sg
(struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int);
extern int zfcp_qdio_sbals_from_scsicmnd
(struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *);
(struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_adapter *adapter);
extern void zfcp_qdio_close(struct zfcp_adapter *adapter);
/******************************** FSF ****************************************/
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
@ -95,7 +91,7 @@ extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
unsigned long *, struct zfcp_fsf_req **)
__acquires(adapter->request_queue.queue_lock);
__acquires(adapter->req_q.lock);
extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
struct zfcp_erp_action *);
extern int zfcp_fsf_send_els(struct zfcp_send_els *);

View file

@ -171,7 +171,6 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status));
spin_lock_irqsave(&adapter->req_list_lock, flags);
atomic_set(&adapter->reqs_active, 0);
for (i = 0; i < REQUEST_LIST_SIZE; i++)
list_splice_init(&adapter->req_list[i], &remove_queue);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
@ -726,7 +725,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
goto failed_req_create;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
fsf_req->sbale_curr = 2;
@ -763,7 +762,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
failed_req_create:
zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return retval;
}
@ -1075,7 +1074,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
&unit->status)))
goto unit_blocked;
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1098,7 +1097,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
fsf_req = NULL;
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return fsf_req;
}
@ -1295,7 +1294,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
goto failed_req;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
if (zfcp_use_one_sbal(ct->req, ct->req_count,
ct->resp, ct->resp_count)){
/* both request buffer and response buffer
@ -1311,7 +1310,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
/* try to use chained SBALs */
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
ct->req, ct->req_count,
ct->req,
ZFCP_MAX_SBALS_PER_CT_REQ);
if (bytes <= 0) {
ZFCP_LOG_INFO("error: creation of CT request failed "
@ -1328,7 +1327,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
ct->resp, ct->resp_count,
ct->resp,
ZFCP_MAX_SBALS_PER_CT_REQ);
if (bytes <= 0) {
ZFCP_LOG_INFO("error: creation of CT request failed "
@ -1387,8 +1386,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
}
failed_req:
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return ret;
}
@ -1593,7 +1591,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
goto port_blocked;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
if (zfcp_use_one_sbal(els->req, els->req_count,
els->resp, els->resp_count)){
/* both request buffer and response buffer
@ -1609,7 +1607,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
/* try to use chained SBALs */
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
els->req, els->req_count,
els->req,
ZFCP_MAX_SBALS_PER_ELS_REQ);
if (bytes <= 0) {
ZFCP_LOG_INFO("error: creation of ELS request failed "
@ -1626,7 +1624,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
bytes = zfcp_qdio_sbals_from_sg(fsf_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
els->resp, els->resp_count,
els->resp,
ZFCP_MAX_SBALS_PER_ELS_REQ);
if (bytes <= 0) {
ZFCP_LOG_INFO("error: creation of ELS request failed "
@ -1657,7 +1655,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
fsf_req->data = (unsigned long) els;
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
zfcp_san_dbf_event_els_request(fsf_req);
@ -1680,8 +1678,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
failed_req:
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return ret;
}
@ -1863,12 +1860,11 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
ZFCP_LOG_INFO("error: Could not create exchange configuration "
"data request for adapter %s.\n",
zfcp_get_busid_by_adapter(adapter));
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return retval;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1882,8 +1878,7 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
zfcp_erp_start_timer(fsf_req);
retval = zfcp_fsf_req_send(fsf_req);
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
if (retval) {
ZFCP_LOG_INFO("error: Could not send exchange configuration "
"data command on the adapter %s\n",
@ -1916,12 +1911,11 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
ZFCP_LOG_INFO("error: Could not create exchange configuration "
"data request for adapter %s.\n",
zfcp_get_busid_by_adapter(adapter));
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return retval;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1936,8 +1930,7 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(fsf_req);
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
if (retval)
ZFCP_LOG_INFO("error: Could not send exchange configuration "
"data command on the adapter %s\n",
@ -2178,12 +2171,11 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
"exchange port data request for "
"the adapter %s.\n",
zfcp_get_busid_by_adapter(adapter));
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return retval;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2192,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
zfcp_erp_start_timer(fsf_req);
retval = zfcp_fsf_req_send(fsf_req);
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
if (retval) {
ZFCP_LOG_INFO("error: Could not send an exchange port data "
@ -2237,21 +2229,20 @@ zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
"exchange port data request for "
"the adapter %s.\n",
zfcp_get_busid_by_adapter(adapter));
write_unlock_irqrestore(&adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return retval;
}
if (data)
fsf_req->data = (unsigned long) data;
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(fsf_req);
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
if (retval)
ZFCP_LOG_INFO("error: Could not send an exchange port data "
@ -2355,7 +2346,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
goto out;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2382,8 +2373,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
zfcp_get_busid_by_adapter(erp_action->adapter),
erp_action->port->wwpn);
out:
write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags);
return retval;
}
@ -2587,7 +2577,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
goto out;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2615,8 +2605,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
zfcp_get_busid_by_adapter(erp_action->adapter),
erp_action->port->wwpn);
out:
write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags);
return retval;
}
@ -2716,7 +2705,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
goto out;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2746,8 +2735,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
zfcp_get_busid_by_adapter(erp_action->adapter),
erp_action->port->wwpn);
out:
write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags);
return retval;
}
@ -2911,7 +2899,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
goto out;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2944,8 +2932,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
zfcp_get_busid_by_adapter(erp_action->adapter),
erp_action->port->wwpn, erp_action->unit->fcp_lun);
out:
write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags);
return retval;
}
@ -3226,7 +3213,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
goto out;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -3255,8 +3242,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
zfcp_get_busid_by_adapter(erp_action->adapter),
erp_action->port->wwpn, erp_action->unit->fcp_lun);
out:
write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock,
lock_flags);
write_unlock_irqrestore(&erp_action->adapter->req_q.lock, lock_flags);
return retval;
}
@ -3498,7 +3484,9 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t);
/* generate SBALEs from data buffer */
real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd);
real_bytes = zfcp_qdio_sbals_from_sg(fsf_req, sbtype,
scsi_sglist(scsi_cmnd),
ZFCP_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) {
ZFCP_LOG_DEBUG(
@ -3556,7 +3544,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
scsi_cmnd->host_scribble = NULL;
success:
failed_req_create:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return retval;
}
@ -3609,7 +3597,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
fsf_req->qtcb->bottom.io.fcp_cmnd_length =
sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t);
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -3629,7 +3617,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
fsf_req = NULL;
out:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return fsf_req;
}
@ -4216,7 +4204,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
goto unlock_queue_lock;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
sbale[0].flags |= direction;
bottom = &fsf_req->qtcb->bottom.support;
@ -4224,7 +4212,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
bottom->option = fsf_cfdc->option;
bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction,
fsf_cfdc->sg, ZFCP_CFDC_PAGES,
fsf_cfdc->sg,
ZFCP_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
retval = -ENOMEM;
@ -4237,7 +4225,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
retval = -EPERM;
goto free_fsf_req;
}
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
wait_event(fsf_req->completion_wq,
fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@ -4247,7 +4235,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
free_fsf_req:
zfcp_fsf_req_free(fsf_req);
unlock_queue_lock:
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
write_unlock_irqrestore(&adapter->req_q.lock, lock_flags);
return ERR_PTR(retval);
}
@ -4261,10 +4249,10 @@ static inline int
zfcp_fsf_req_sbal_check(unsigned long *flags,
struct zfcp_qdio_queue *queue, int needed)
{
write_lock_irqsave(&queue->queue_lock, *flags);
if (likely(atomic_read(&queue->free_count) >= needed))
write_lock_irqsave(&queue->lock, *flags);
if (likely(atomic_read(&queue->count) >= needed))
return 1;
write_unlock_irqrestore(&queue->queue_lock, *flags);
write_unlock_irqrestore(&queue->lock, *flags);
return 0;
}
@ -4293,24 +4281,24 @@ zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
* @req_flags: flags indicating whether to wait for needed SBAL or not
* @lock_flags: lock_flags if queue_lock is taken
* Return: 0 on success, otherwise -EIO, or -ERESTARTSYS
* Locks: lock adapter->request_queue->queue_lock on success
* Locks: lock adapter->req_q->lock on success
*/
static int
zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags,
unsigned long *lock_flags)
{
long ret;
struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
struct zfcp_qdio_queue *req_q = &adapter->req_q;
if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
ret = wait_event_interruptible_timeout(adapter->request_wq,
zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1),
zfcp_fsf_req_sbal_check(lock_flags, req_q, 1),
ZFCP_SBAL_TIMEOUT);
if (ret < 0)
return ret;
if (!ret)
return -EIO;
} else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1))
} else if (!zfcp_fsf_req_sbal_check(lock_flags, req_q, 1))
return -EIO;
return 0;
@ -4340,7 +4328,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
volatile struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req = NULL;
int ret = 0;
struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
struct zfcp_qdio_queue *req_q = &adapter->req_q;
/* allocate new FSF request */
fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
@ -4377,7 +4365,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
*/
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
write_unlock_irqrestore(&req_q->lock, *lock_flags);
ret = -EIO;
goto failed_sbals;
}
@ -4387,15 +4375,15 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
}
fsf_req->sbal_number = 1;
fsf_req->sbal_first = req_queue->free_index;
fsf_req->sbal_last = req_queue->free_index;
fsf_req->sbal_first = req_q->first;
fsf_req->sbal_last = req_q->first;
fsf_req->sbale_curr = 1;
if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) {
fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
}
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_last, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
/* setup common SBALE fields */
sbale[0].addr = (void *) fsf_req->req_id;
@ -4416,7 +4404,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
fsf_req = NULL;
failed_fsf_req:
write_lock_irqsave(&req_queue->queue_lock, *lock_flags);
write_lock_irqsave(&req_q->lock, *lock_flags);
success:
*fsf_req_p = fsf_req;
return ret;
@ -4433,18 +4421,17 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_adapter *adapter;
struct zfcp_qdio_queue *req_queue;
struct zfcp_qdio_queue *req_q;
volatile struct qdio_buffer_element *sbale;
int inc_seq_no;
int new_distance_from_int;
int retval = 0;
adapter = fsf_req->adapter;
req_queue = &adapter->request_queue,
req_q = &adapter->req_q;
/* FIXME(debug): remove it later */
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0);
sbale = zfcp_qdio_sbale_req(fsf_req);
ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags);
ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
@ -4457,52 +4444,24 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
inc_seq_no = (fsf_req->qtcb != NULL);
ZFCP_LOG_TRACE("request queue of adapter %s: "
"next free SBAL is %i, %i free SBALs\n",
zfcp_get_busid_by_adapter(adapter),
req_queue->free_index,
atomic_read(&req_queue->free_count));
ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, "
"index_in_queue=%i, count=%i, buffers=%p\n",
zfcp_get_busid_by_adapter(adapter),
QDIO_FLAG_SYNC_OUTPUT,
0, fsf_req->sbal_first, fsf_req->sbal_number,
&req_queue->buffer[fsf_req->sbal_first]);
/*
* adjust the number of free SBALs in request queue as well as
* position of first one
*/
atomic_sub(fsf_req->sbal_number, &req_queue->free_count);
ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count));
req_queue->free_index += fsf_req->sbal_number; /* increase */
req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
fsf_req->issued = get_clock();
retval = do_QDIO(adapter->ccw_device,
QDIO_FLAG_SYNC_OUTPUT,
0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
retval = zfcp_qdio_send(fsf_req);
if (unlikely(retval)) {
/* Queues are down..... */
retval = -EIO;
del_timer(&fsf_req->timer);
spin_lock(&adapter->req_list_lock);
zfcp_reqlist_remove(adapter, fsf_req);
spin_unlock(&adapter->req_list_lock);
/* undo changes in request queue made for this request */
zfcp_qdio_zero_sbals(req_queue->buffer,
fsf_req->sbal_first, fsf_req->sbal_number);
atomic_add(fsf_req->sbal_number, &req_queue->free_count);
req_queue->free_index -= fsf_req->sbal_number;
req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
atomic_add(fsf_req->sbal_number, &req_q->count);
req_q->first -= fsf_req->sbal_number;
req_q->first += QDIO_MAX_BUFFERS_PER_Q;
req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req);
retval = -EIO;
} else {
req_queue->distance_from_int = new_distance_from_int;
/*
* increase FSF sequence counter -
* this must only be done for request successfully enqueued to
@ -4514,9 +4473,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
/* Don't increase for unsolicited status */
if (inc_seq_no)
adapter->fsf_req_seq_no++;
/* count FSF requests pending */
atomic_inc(&adapter->reqs_active);
}
return retval;
}

File diff suppressed because it is too large Load diff