1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Implementation of FSF commands.
6  *
7  * Copyright IBM Corp. 2002, 2020
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/blktrace_api.h>
14 #include <linux/jiffies.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <scsi/fc/fc_els.h>
18 #include "zfcp_ext.h"
19 #include "zfcp_fc.h"
20 #include "zfcp_dbf.h"
21 #include "zfcp_qdio.h"
22 #include "zfcp_reqlist.h"
23 #include "zfcp_diag.h"
24 
25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */
28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
29 
30 struct kmem_cache *zfcp_fsf_qtcb_cache;
31 
32 static bool ber_stop = true;
33 module_param(ber_stop, bool, 0600);
34 MODULE_PARM_DESC(ber_stop,
35 		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
36 
zfcp_fsf_request_timeout_handler(struct timer_list * t)37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
38 {
39 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
40 	struct zfcp_adapter *adapter = fsf_req->adapter;
41 
42 	zfcp_qdio_siosl(adapter);
43 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
44 				"fsrth_1");
45 }
46 
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
48 				 unsigned long timeout)
49 {
50 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
51 	fsf_req->timer.expires = jiffies + timeout;
52 	add_timer(&fsf_req->timer);
53 }
54 
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
56 {
57 	BUG_ON(!fsf_req->erp_action);
58 	fsf_req->timer.function = zfcp_erp_timeout_handler;
59 	fsf_req->timer.expires = jiffies + 30 * HZ;
60 	add_timer(&fsf_req->timer);
61 }
62 
63 /* association between FSF command and FSF QTCB type */
64 static u32 fsf_qtcb_type[] = {
65 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
66 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
67 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
68 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
69 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
70 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
71 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
72 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
73 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
74 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
75 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
76 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
77 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
78 };
79 
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
81 {
82 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
83 		"operational because of an unsupported FC class\n");
84 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
85 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
86 }
87 
88 /**
89  * zfcp_fsf_req_free - free memory used by fsf request
90  * @req: pointer to struct zfcp_fsf_req
91  */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
93 {
94 	if (likely(req->pool)) {
95 		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
96 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
97 		mempool_free(req, req->pool);
98 		return;
99 	}
100 
101 	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
102 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
103 	kfree(req);
104 }
105 
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
107 {
108 	unsigned long flags;
109 	struct fsf_status_read_buffer *sr_buf = req->data;
110 	struct zfcp_adapter *adapter = req->adapter;
111 	struct zfcp_port *port;
112 	int d_id = ntoh24(sr_buf->d_id);
113 
114 	read_lock_irqsave(&adapter->port_list_lock, flags);
115 	list_for_each_entry(port, &adapter->port_list, list)
116 		if (port->d_id == d_id) {
117 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
118 			break;
119 		}
120 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
121 }
122 
zfcp_fsf_fc_host_link_down(struct zfcp_adapter * adapter)123 void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
124 {
125 	struct Scsi_Host *shost = adapter->scsi_host;
126 
127 	adapter->hydra_version = 0;
128 	adapter->peer_wwpn = 0;
129 	adapter->peer_wwnn = 0;
130 	adapter->peer_d_id = 0;
131 
132 	/* if there is no shost yet, we have nothing to zero-out */
133 	if (shost == NULL)
134 		return;
135 
136 	fc_host_port_id(shost) = 0;
137 	fc_host_fabric_name(shost) = 0;
138 	fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
139 	fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
140 	snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
141 	memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
142 }
143 
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)144 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
145 					 struct fsf_link_down_info *link_down)
146 {
147 	struct zfcp_adapter *adapter = req->adapter;
148 
149 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
150 		return;
151 
152 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
153 
154 	zfcp_scsi_schedule_rports_block(adapter);
155 
156 	zfcp_fsf_fc_host_link_down(adapter);
157 
158 	if (!link_down)
159 		goto out;
160 
161 	switch (link_down->error_code) {
162 	case FSF_PSQ_LINK_NO_LIGHT:
163 		dev_warn(&req->adapter->ccw_device->dev,
164 			 "There is no light signal from the local "
165 			 "fibre channel cable\n");
166 		break;
167 	case FSF_PSQ_LINK_WRAP_PLUG:
168 		dev_warn(&req->adapter->ccw_device->dev,
169 			 "There is a wrap plug instead of a fibre "
170 			 "channel cable\n");
171 		break;
172 	case FSF_PSQ_LINK_NO_FCP:
173 		dev_warn(&req->adapter->ccw_device->dev,
174 			 "The adjacent fibre channel node does not "
175 			 "support FCP\n");
176 		break;
177 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
178 		dev_warn(&req->adapter->ccw_device->dev,
179 			 "The FCP device is suspended because of a "
180 			 "firmware update\n");
181 		break;
182 	case FSF_PSQ_LINK_INVALID_WWPN:
183 		dev_warn(&req->adapter->ccw_device->dev,
184 			 "The FCP device detected a WWPN that is "
185 			 "duplicate or not valid\n");
186 		break;
187 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
188 		dev_warn(&req->adapter->ccw_device->dev,
189 			 "The fibre channel fabric does not support NPIV\n");
190 		break;
191 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
192 		dev_warn(&req->adapter->ccw_device->dev,
193 			 "The FCP adapter cannot support more NPIV ports\n");
194 		break;
195 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
196 		dev_warn(&req->adapter->ccw_device->dev,
197 			 "The adjacent switch cannot support "
198 			 "more NPIV ports\n");
199 		break;
200 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
201 		dev_warn(&req->adapter->ccw_device->dev,
202 			 "The FCP adapter could not log in to the "
203 			 "fibre channel fabric\n");
204 		break;
205 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
206 		dev_warn(&req->adapter->ccw_device->dev,
207 			 "The WWPN assignment file on the FCP adapter "
208 			 "has been damaged\n");
209 		break;
210 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
211 		dev_warn(&req->adapter->ccw_device->dev,
212 			 "The mode table on the FCP adapter "
213 			 "has been damaged\n");
214 		break;
215 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
216 		dev_warn(&req->adapter->ccw_device->dev,
217 			 "All NPIV ports on the FCP adapter have "
218 			 "been assigned\n");
219 		break;
220 	default:
221 		dev_warn(&req->adapter->ccw_device->dev,
222 			 "The link between the FCP adapter and "
223 			 "the FC fabric is down\n");
224 	}
225 out:
226 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
227 }
228 
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)229 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
230 {
231 	struct fsf_status_read_buffer *sr_buf = req->data;
232 	struct fsf_link_down_info *ldi =
233 		(struct fsf_link_down_info *) &sr_buf->payload;
234 
235 	switch (sr_buf->status_subtype) {
236 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
237 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
238 		zfcp_fsf_link_down_info_eval(req, ldi);
239 		break;
240 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
241 		zfcp_fsf_link_down_info_eval(req, NULL);
242 	}
243 }
244 
245 static void
zfcp_fsf_status_read_version_change(struct zfcp_adapter * adapter,struct fsf_status_read_buffer * sr_buf)246 zfcp_fsf_status_read_version_change(struct zfcp_adapter *adapter,
247 				    struct fsf_status_read_buffer *sr_buf)
248 {
249 	if (sr_buf->status_subtype == FSF_STATUS_READ_SUB_LIC_CHANGE) {
250 		u32 version = sr_buf->payload.version_change.current_version;
251 
252 		WRITE_ONCE(adapter->fsf_lic_version, version);
253 		snprintf(fc_host_firmware_version(adapter->scsi_host),
254 			 FC_VERSION_STRING_SIZE, "%#08x", version);
255 	}
256 }
257 
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)258 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
259 {
260 	struct zfcp_adapter *adapter = req->adapter;
261 	struct fsf_status_read_buffer *sr_buf = req->data;
262 
263 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
264 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
265 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
266 		zfcp_fsf_req_free(req);
267 		return;
268 	}
269 
270 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
271 
272 	switch (sr_buf->status_type) {
273 	case FSF_STATUS_READ_PORT_CLOSED:
274 		zfcp_fsf_status_read_port_closed(req);
275 		break;
276 	case FSF_STATUS_READ_INCOMING_ELS:
277 		zfcp_fc_incoming_els(req);
278 		break;
279 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
280 		break;
281 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
282 		zfcp_dbf_hba_bit_err("fssrh_3", req);
283 		if (ber_stop) {
284 			dev_warn(&adapter->ccw_device->dev,
285 				 "All paths over this FCP device are disused because of excessive bit errors\n");
286 			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
287 		} else {
288 			dev_warn(&adapter->ccw_device->dev,
289 				 "The error threshold for checksum statistics has been exceeded\n");
290 		}
291 		break;
292 	case FSF_STATUS_READ_LINK_DOWN:
293 		zfcp_fsf_status_read_link_down(req);
294 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
295 		break;
296 	case FSF_STATUS_READ_LINK_UP:
297 		dev_info(&adapter->ccw_device->dev,
298 			 "The local link has been restored\n");
299 		/* All ports should be marked as ready to run again */
300 		zfcp_erp_set_adapter_status(adapter,
301 					    ZFCP_STATUS_COMMON_RUNNING);
302 		zfcp_erp_adapter_reopen(adapter,
303 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
304 					ZFCP_STATUS_COMMON_ERP_FAILED,
305 					"fssrh_2");
306 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
307 
308 		break;
309 	case FSF_STATUS_READ_NOTIFICATION_LOST:
310 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
311 			zfcp_fc_conditional_port_scan(adapter);
312 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_VERSION_CHANGE)
313 			queue_work(adapter->work_queue,
314 				   &adapter->version_change_lost_work);
315 		break;
316 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
317 		adapter->adapter_features = sr_buf->payload.word[0];
318 		break;
319 	case FSF_STATUS_READ_VERSION_CHANGE:
320 		zfcp_fsf_status_read_version_change(adapter, sr_buf);
321 		break;
322 	}
323 
324 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
325 	zfcp_fsf_req_free(req);
326 
327 	atomic_inc(&adapter->stat_miss);
328 	queue_work(adapter->work_queue, &adapter->stat_work);
329 }
330 
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)331 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
332 {
333 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
334 	case FSF_SQ_FCP_RSP_AVAILABLE:
335 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
336 	case FSF_SQ_NO_RETRY_POSSIBLE:
337 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
338 		return;
339 	case FSF_SQ_COMMAND_ABORTED:
340 		break;
341 	case FSF_SQ_NO_RECOM:
342 		dev_err(&req->adapter->ccw_device->dev,
343 			"The FCP adapter reported a problem "
344 			"that cannot be recovered\n");
345 		zfcp_qdio_siosl(req->adapter);
346 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
347 		break;
348 	}
349 	/* all non-return stats set FSFREQ_ERROR*/
350 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
351 }
352 
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)353 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
354 {
355 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
356 		return;
357 
358 	switch (req->qtcb->header.fsf_status) {
359 	case FSF_UNKNOWN_COMMAND:
360 		dev_err(&req->adapter->ccw_device->dev,
361 			"The FCP adapter does not recognize the command 0x%x\n",
362 			req->qtcb->header.fsf_command);
363 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
364 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
365 		break;
366 	case FSF_ADAPTER_STATUS_AVAILABLE:
367 		zfcp_fsf_fsfstatus_qual_eval(req);
368 		break;
369 	}
370 }
371 
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)372 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
373 {
374 	struct zfcp_adapter *adapter = req->adapter;
375 	struct fsf_qtcb *qtcb = req->qtcb;
376 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
377 
378 	zfcp_dbf_hba_fsf_response(req);
379 
380 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
381 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
382 		return;
383 	}
384 
385 	switch (qtcb->prefix.prot_status) {
386 	case FSF_PROT_GOOD:
387 	case FSF_PROT_FSF_STATUS_PRESENTED:
388 		return;
389 	case FSF_PROT_QTCB_VERSION_ERROR:
390 		dev_err(&adapter->ccw_device->dev,
391 			"QTCB version 0x%x not supported by FCP adapter "
392 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
393 			psq->word[0], psq->word[1]);
394 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
395 		break;
396 	case FSF_PROT_ERROR_STATE:
397 	case FSF_PROT_SEQ_NUMB_ERROR:
398 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
399 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
400 		break;
401 	case FSF_PROT_UNSUPP_QTCB_TYPE:
402 		dev_err(&adapter->ccw_device->dev,
403 			"The QTCB type is not supported by the FCP adapter\n");
404 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
405 		break;
406 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
407 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
408 				&adapter->status);
409 		break;
410 	case FSF_PROT_DUPLICATE_REQUEST_ID:
411 		dev_err(&adapter->ccw_device->dev,
412 			"0x%Lx is an ambiguous request identifier\n",
413 			(unsigned long long)qtcb->bottom.support.req_handle);
414 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
415 		break;
416 	case FSF_PROT_LINK_DOWN:
417 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
418 		/* go through reopen to flush pending requests */
419 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
420 		break;
421 	case FSF_PROT_REEST_QUEUE:
422 		/* All ports should be marked as ready to run again */
423 		zfcp_erp_set_adapter_status(adapter,
424 					    ZFCP_STATUS_COMMON_RUNNING);
425 		zfcp_erp_adapter_reopen(adapter,
426 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
427 					ZFCP_STATUS_COMMON_ERP_FAILED,
428 					"fspse_8");
429 		break;
430 	default:
431 		dev_err(&adapter->ccw_device->dev,
432 			"0x%x is not a valid transfer protocol status\n",
433 			qtcb->prefix.prot_status);
434 		zfcp_qdio_siosl(adapter);
435 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
436 	}
437 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
438 }
439 
440 /**
441  * zfcp_fsf_req_complete - process completion of a FSF request
442  * @req: The FSF request that has been completed.
443  *
444  * When a request has been completed either from the FCP adapter,
445  * or it has been dismissed due to a queue shutdown, this function
446  * is called to process the completion status and trigger further
447  * events related to the FSF request.
448  * Caller must ensure that the request has been removed from
449  * adapter->req_list, to protect against concurrent modification
450  * by zfcp_erp_strategy_check_fsfreq().
451  */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)452 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
453 {
454 	struct zfcp_erp_action *erp_action;
455 
456 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
457 		zfcp_fsf_status_read_handler(req);
458 		return;
459 	}
460 
461 	del_timer_sync(&req->timer);
462 	zfcp_fsf_protstatus_eval(req);
463 	zfcp_fsf_fsfstatus_eval(req);
464 	req->handler(req);
465 
466 	erp_action = req->erp_action;
467 	if (erp_action)
468 		zfcp_erp_notify(erp_action, 0);
469 
470 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
471 		zfcp_fsf_req_free(req);
472 	else
473 		complete(&req->completion);
474 }
475 
476 /**
477  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
478  * @adapter: pointer to struct zfcp_adapter
479  *
480  * Never ever call this without shutting down the adapter first.
481  * Otherwise the adapter would continue using and corrupting s390 storage.
482  * Included BUG_ON() call to ensure this is done.
483  * ERP is supposed to be the only user of this function.
484  */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)485 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
486 {
487 	struct zfcp_fsf_req *req, *tmp;
488 	LIST_HEAD(remove_queue);
489 
490 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
491 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
492 
493 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
494 		list_del(&req->list);
495 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
496 		zfcp_fsf_req_complete(req);
497 	}
498 }
499 
500 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
501 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
502 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
503 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
504 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
505 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
506 #define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
507 #define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
508 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
509 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
510 
zfcp_fsf_convert_portspeed(u32 fsf_speed)511 u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
512 {
513 	u32 fdmi_speed = 0;
514 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
515 		fdmi_speed |= FC_PORTSPEED_1GBIT;
516 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
517 		fdmi_speed |= FC_PORTSPEED_2GBIT;
518 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
519 		fdmi_speed |= FC_PORTSPEED_4GBIT;
520 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
521 		fdmi_speed |= FC_PORTSPEED_10GBIT;
522 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
523 		fdmi_speed |= FC_PORTSPEED_8GBIT;
524 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
525 		fdmi_speed |= FC_PORTSPEED_16GBIT;
526 	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
527 		fdmi_speed |= FC_PORTSPEED_32GBIT;
528 	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
529 		fdmi_speed |= FC_PORTSPEED_64GBIT;
530 	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
531 		fdmi_speed |= FC_PORTSPEED_128GBIT;
532 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
533 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
534 	return fdmi_speed;
535 }
536 
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)537 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
538 {
539 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
540 	struct zfcp_adapter *adapter = req->adapter;
541 	struct fc_els_flogi *plogi;
542 
543 	/* adjust pointers for missing command code */
544 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
545 					- sizeof(u32));
546 
547 	if (req->data)
548 		memcpy(req->data, bottom, sizeof(*bottom));
549 
550 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
551 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
552 					 (u16)FSF_STATUS_READS_RECOM);
553 
554 	/* no error return above here, otherwise must fix call chains */
555 	/* do not evaluate invalid fields */
556 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
557 		return 0;
558 
559 	adapter->hydra_version = bottom->adapter_type;
560 
561 	switch (bottom->fc_topology) {
562 	case FSF_TOPO_P2P:
563 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
564 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
565 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
566 		break;
567 	case FSF_TOPO_FABRIC:
568 		break;
569 	case FSF_TOPO_AL:
570 	default:
571 		dev_err(&adapter->ccw_device->dev,
572 			"Unknown or unsupported arbitrated loop "
573 			"fibre channel topology detected\n");
574 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
575 		return -EIO;
576 	}
577 
578 	return 0;
579 }
580 
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)581 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
582 {
583 	struct zfcp_adapter *adapter = req->adapter;
584 	struct zfcp_diag_header *const diag_hdr =
585 		&adapter->diagnostics->config_data.header;
586 	struct fsf_qtcb *qtcb = req->qtcb;
587 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
588 
589 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
590 		return;
591 
592 	adapter->fsf_lic_version = bottom->lic_version;
593 	adapter->adapter_features = bottom->adapter_features;
594 	adapter->connection_features = bottom->connection_features;
595 	adapter->peer_wwpn = 0;
596 	adapter->peer_wwnn = 0;
597 	adapter->peer_d_id = 0;
598 
599 	switch (qtcb->header.fsf_status) {
600 	case FSF_GOOD:
601 		/*
602 		 * usually we wait with an update till the cache is too old,
603 		 * but because we have the data available, update it anyway
604 		 */
605 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
606 
607 		zfcp_scsi_shost_update_config_data(adapter, bottom, false);
608 		if (zfcp_fsf_exchange_config_evaluate(req))
609 			return;
610 
611 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
612 			dev_err(&adapter->ccw_device->dev,
613 				"FCP adapter maximum QTCB size (%d bytes) "
614 				"is too small\n",
615 				bottom->max_qtcb_size);
616 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
617 			return;
618 		}
619 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
620 				&adapter->status);
621 		break;
622 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
623 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
624 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
625 
626 		/* avoids adapter shutdown to be able to recognize
627 		 * events such as LINK UP */
628 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
629 				&adapter->status);
630 		zfcp_fsf_link_down_info_eval(req,
631 			&qtcb->header.fsf_status_qual.link_down_info);
632 
633 		zfcp_scsi_shost_update_config_data(adapter, bottom, true);
634 		if (zfcp_fsf_exchange_config_evaluate(req))
635 			return;
636 		break;
637 	default:
638 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
639 		return;
640 	}
641 
642 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
643 		adapter->hardware_version = bottom->hardware_version;
644 
645 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
646 		dev_err(&adapter->ccw_device->dev,
647 			"The FCP adapter only supports newer "
648 			"control block versions\n");
649 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
650 		return;
651 	}
652 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
653 		dev_err(&adapter->ccw_device->dev,
654 			"The FCP adapter only supports older "
655 			"control block versions\n");
656 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
657 	}
658 }
659 
660 /*
661  * Mapping of FC Endpoint Security flag masks to mnemonics
662  *
663  * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
664  *       changes.
665  */
666 static const struct {
667 	u32	mask;
668 	char	*name;
669 } zfcp_fsf_fc_security_mnemonics[] = {
670 	{ FSF_FC_SECURITY_AUTH,		"Authentication" },
671 	{ FSF_FC_SECURITY_ENC_FCSP2 |
672 	  FSF_FC_SECURITY_ENC_ERAS,	"Encryption" },
673 };
674 
675 /* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
676 #define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
677 
678 /**
679  * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
680  *                                   mnemonics and place in a buffer
681  * @buf        : the buffer to place the translated FC Endpoint Security flag(s)
682  *               into
683  * @size       : the size of the buffer, including the trailing null space
684  * @fc_security: one or more FC Endpoint Security flags, or zero
685  * @fmt        : specifies whether a list or a single item is to be put into the
686  *               buffer
687  *
688  * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
689  * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
690  *
691  * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
692  * a comma followed by a space into the buffer. If one or more FC Endpoint
693  * Security flags cannot be translated into a mnemonic, as they are undefined
694  * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
695  * representation is placed into the buffer.
696  *
697  * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
698  * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
699  * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
700  * representation is placed into the buffer. If more than one FC Endpoint
701  * Security flag was specified, their value in hexadecimal representation is
702  * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
703  * can be used to define a buffer that is large enough to hold one mnemonic.
704  *
705  * Return: The number of characters written into buf not including the trailing
706  *         '\0'. If size is == 0 the function returns 0.
707  */
zfcp_fsf_scnprint_fc_security(char * buf,size_t size,u32 fc_security,enum zfcp_fsf_print_fmt fmt)708 ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
709 				      enum zfcp_fsf_print_fmt fmt)
710 {
711 	const char *prefix = "";
712 	ssize_t len = 0;
713 	int i;
714 
715 	if (fc_security == 0)
716 		return scnprintf(buf, size, "none");
717 	if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
718 		return scnprintf(buf, size, "0x%08x", fc_security);
719 
720 	for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
721 		if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
722 			continue;
723 
724 		len += scnprintf(buf + len, size - len, "%s%s", prefix,
725 				 zfcp_fsf_fc_security_mnemonics[i].name);
726 		prefix = ", ";
727 		fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
728 	}
729 
730 	if (fc_security != 0)
731 		len += scnprintf(buf + len, size - len, "%s0x%08x",
732 				 prefix, fc_security);
733 
734 	return len;
735 }
736 
zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter * adapter,struct zfcp_fsf_req * req)737 static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
738 					     struct zfcp_fsf_req *req)
739 {
740 	if (adapter->fc_security_algorithms ==
741 	    adapter->fc_security_algorithms_old) {
742 		/* no change, no trace */
743 		return;
744 	}
745 
746 	zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
747 			      adapter->fc_security_algorithms_old,
748 			      adapter->fc_security_algorithms);
749 
750 	adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
751 }
752 
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)753 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
754 {
755 	struct zfcp_adapter *adapter = req->adapter;
756 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
757 
758 	if (req->data)
759 		memcpy(req->data, bottom, sizeof(*bottom));
760 
761 	if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
762 		adapter->fc_security_algorithms =
763 			bottom->fc_security_algorithms;
764 	else
765 		adapter->fc_security_algorithms = 0;
766 	zfcp_fsf_dbf_adapter_fc_security(adapter, req);
767 }
768 
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)769 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
770 {
771 	struct zfcp_diag_header *const diag_hdr =
772 		&req->adapter->diagnostics->port_data.header;
773 	struct fsf_qtcb *qtcb = req->qtcb;
774 	struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
775 
776 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
777 		return;
778 
779 	switch (qtcb->header.fsf_status) {
780 	case FSF_GOOD:
781 		/*
782 		 * usually we wait with an update till the cache is too old,
783 		 * but because we have the data available, update it anyway
784 		 */
785 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
786 
787 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
788 		zfcp_fsf_exchange_port_evaluate(req);
789 		break;
790 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
791 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
792 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
793 
794 		zfcp_fsf_link_down_info_eval(req,
795 			&qtcb->header.fsf_status_qual.link_down_info);
796 
797 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
798 		zfcp_fsf_exchange_port_evaluate(req);
799 		break;
800 	}
801 }
802 
zfcp_fsf_alloc(mempool_t * pool)803 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
804 {
805 	struct zfcp_fsf_req *req;
806 
807 	if (likely(pool))
808 		req = mempool_alloc(pool, GFP_ATOMIC);
809 	else
810 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
811 
812 	if (unlikely(!req))
813 		return NULL;
814 
815 	memset(req, 0, sizeof(*req));
816 	req->pool = pool;
817 	return req;
818 }
819 
zfcp_fsf_qtcb_alloc(mempool_t * pool)820 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
821 {
822 	struct fsf_qtcb *qtcb;
823 
824 	if (likely(pool))
825 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
826 	else
827 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
828 
829 	if (unlikely(!qtcb))
830 		return NULL;
831 
832 	memset(qtcb, 0, sizeof(*qtcb));
833 	return qtcb;
834 }
835 
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)836 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
837 						u32 fsf_cmd, u8 sbtype,
838 						mempool_t *pool)
839 {
840 	struct zfcp_adapter *adapter = qdio->adapter;
841 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
842 
843 	if (unlikely(!req))
844 		return ERR_PTR(-ENOMEM);
845 
846 	if (adapter->req_no == 0)
847 		adapter->req_no++;
848 
849 	timer_setup(&req->timer, NULL, 0);
850 	init_completion(&req->completion);
851 
852 	req->adapter = adapter;
853 	req->req_id = adapter->req_no;
854 
855 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
856 		if (likely(pool))
857 			req->qtcb = zfcp_fsf_qtcb_alloc(
858 				adapter->pool.qtcb_pool);
859 		else
860 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
861 
862 		if (unlikely(!req->qtcb)) {
863 			zfcp_fsf_req_free(req);
864 			return ERR_PTR(-ENOMEM);
865 		}
866 
867 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
868 		req->qtcb->prefix.req_id = req->req_id;
869 		req->qtcb->prefix.ulp_info = 26;
870 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
871 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
872 		req->qtcb->header.req_handle = req->req_id;
873 		req->qtcb->header.fsf_command = fsf_cmd;
874 	}
875 
876 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
877 			   req->qtcb, sizeof(struct fsf_qtcb));
878 
879 	return req;
880 }
881 
zfcp_fsf_req_send(struct zfcp_fsf_req * req)882 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
883 {
884 	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
885 	struct zfcp_adapter *adapter = req->adapter;
886 	struct zfcp_qdio *qdio = adapter->qdio;
887 	int req_id = req->req_id;
888 
889 	zfcp_reqlist_add(adapter->req_list, req);
890 
891 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
892 	req->issued = get_tod_clock();
893 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
894 		del_timer_sync(&req->timer);
895 		/* lookup request again, list might have changed */
896 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
897 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
898 		return -EIO;
899 	}
900 
901 	/*
902 	 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
903 	 *	 ONLY TOUCH SYNC req AGAIN ON req->completion.
904 	 *
905 	 * The request might complete and be freed concurrently at any point
906 	 * now. This is not protected by the QDIO-lock (req_q_lock). So any
907 	 * uncontrolled access after this might result in an use-after-free bug.
908 	 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
909 	 * when it is completed via req->completion, is it safe to use req
910 	 * again.
911 	 */
912 
913 	/* Don't increase for unsolicited status */
914 	if (!is_srb)
915 		adapter->fsf_req_seq_no++;
916 	adapter->req_no++;
917 
918 	return 0;
919 }
920 
921 /**
922  * zfcp_fsf_status_read - send status read request
923  * @qdio: pointer to struct zfcp_qdio
924  * Returns: 0 on success, ERROR otherwise
925  */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)926 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
927 {
928 	struct zfcp_adapter *adapter = qdio->adapter;
929 	struct zfcp_fsf_req *req;
930 	struct fsf_status_read_buffer *sr_buf;
931 	struct page *page;
932 	int retval = -EIO;
933 
934 	spin_lock_irq(&qdio->req_q_lock);
935 	if (zfcp_qdio_sbal_get(qdio))
936 		goto out;
937 
938 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
939 				  SBAL_SFLAGS0_TYPE_STATUS,
940 				  adapter->pool.status_read_req);
941 	if (IS_ERR(req)) {
942 		retval = PTR_ERR(req);
943 		goto out;
944 	}
945 
946 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
947 	if (!page) {
948 		retval = -ENOMEM;
949 		goto failed_buf;
950 	}
951 	sr_buf = page_address(page);
952 	memset(sr_buf, 0, sizeof(*sr_buf));
953 	req->data = sr_buf;
954 
955 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
956 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
957 
958 	retval = zfcp_fsf_req_send(req);
959 	if (retval)
960 		goto failed_req_send;
961 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
962 
963 	goto out;
964 
965 failed_req_send:
966 	req->data = NULL;
967 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
968 failed_buf:
969 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
970 	zfcp_fsf_req_free(req);
971 out:
972 	spin_unlock_irq(&qdio->req_q_lock);
973 	return retval;
974 }
975 
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)976 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
977 {
978 	struct scsi_device *sdev = req->data;
979 	struct zfcp_scsi_dev *zfcp_sdev;
980 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
981 
982 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
983 		return;
984 
985 	zfcp_sdev = sdev_to_zfcp(sdev);
986 
987 	switch (req->qtcb->header.fsf_status) {
988 	case FSF_PORT_HANDLE_NOT_VALID:
989 		if (fsq->word[0] == fsq->word[1]) {
990 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
991 						"fsafch1");
992 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
993 		}
994 		break;
995 	case FSF_LUN_HANDLE_NOT_VALID:
996 		if (fsq->word[0] == fsq->word[1]) {
997 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
998 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
999 		}
1000 		break;
1001 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
1002 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1003 		break;
1004 	case FSF_PORT_BOXED:
1005 		zfcp_erp_set_port_status(zfcp_sdev->port,
1006 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1007 		zfcp_erp_port_reopen(zfcp_sdev->port,
1008 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
1009 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1010 		break;
1011 	case FSF_LUN_BOXED:
1012 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1013 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
1014 				    "fsafch4");
1015 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1016                 break;
1017 	case FSF_ADAPTER_STATUS_AVAILABLE:
1018 		switch (fsq->word[0]) {
1019 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1020 			zfcp_fc_test_link(zfcp_sdev->port);
1021 			fallthrough;
1022 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1023 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1024 			break;
1025 		}
1026 		break;
1027 	case FSF_GOOD:
1028 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1029 		break;
1030 	}
1031 }
1032 
1033 /**
1034  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
1035  * @scmnd: The SCSI command to abort
1036  * Returns: pointer to struct zfcp_fsf_req
1037  */
1038 
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)1039 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
1040 {
1041 	struct zfcp_fsf_req *req = NULL;
1042 	struct scsi_device *sdev = scmnd->device;
1043 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1044 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
1045 	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
1046 
1047 	spin_lock_irq(&qdio->req_q_lock);
1048 	if (zfcp_qdio_sbal_get(qdio))
1049 		goto out;
1050 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
1051 				  SBAL_SFLAGS0_TYPE_READ,
1052 				  qdio->adapter->pool.scsi_abort);
1053 	if (IS_ERR(req)) {
1054 		req = NULL;
1055 		goto out;
1056 	}
1057 
1058 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
1059 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
1060 		goto out_error_free;
1061 
1062 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1063 
1064 	req->data = sdev;
1065 	req->handler = zfcp_fsf_abort_fcp_command_handler;
1066 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1067 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
1068 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1069 
1070 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
1071 	if (!zfcp_fsf_req_send(req)) {
1072 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
1073 		goto out;
1074 	}
1075 
1076 out_error_free:
1077 	zfcp_fsf_req_free(req);
1078 	req = NULL;
1079 out:
1080 	spin_unlock_irq(&qdio->req_q_lock);
1081 	return req;
1082 }
1083 
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)1084 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1085 {
1086 	struct zfcp_adapter *adapter = req->adapter;
1087 	struct zfcp_fsf_ct_els *ct = req->data;
1088 	struct fsf_qtcb_header *header = &req->qtcb->header;
1089 
1090 	ct->status = -EINVAL;
1091 
1092 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1093 		goto skip_fsfstatus;
1094 
1095 	switch (header->fsf_status) {
1096         case FSF_GOOD:
1097 		ct->status = 0;
1098 		zfcp_dbf_san_res("fsscth2", req);
1099 		break;
1100         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1101 		zfcp_fsf_class_not_supp(req);
1102 		break;
1103         case FSF_ADAPTER_STATUS_AVAILABLE:
1104                 switch (header->fsf_status_qual.word[0]){
1105                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1106                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1107 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1108 			break;
1109                 }
1110                 break;
1111         case FSF_PORT_BOXED:
1112 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1113 		break;
1114 	case FSF_PORT_HANDLE_NOT_VALID:
1115 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
1116 		fallthrough;
1117 	case FSF_GENERIC_COMMAND_REJECTED:
1118 	case FSF_PAYLOAD_SIZE_MISMATCH:
1119 	case FSF_REQUEST_SIZE_TOO_LARGE:
1120 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1121 	case FSF_SBAL_MISMATCH:
1122 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1123 		break;
1124 	}
1125 
1126 skip_fsfstatus:
1127 	if (ct->handler)
1128 		ct->handler(ct->handler_data);
1129 }
1130 
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1131 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
1132 					    struct zfcp_qdio_req *q_req,
1133 					    struct scatterlist *sg_req,
1134 					    struct scatterlist *sg_resp)
1135 {
1136 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
1137 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
1138 	zfcp_qdio_set_sbale_last(qdio, q_req);
1139 }
1140 
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1141 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1142 				       struct scatterlist *sg_req,
1143 				       struct scatterlist *sg_resp)
1144 {
1145 	struct zfcp_adapter *adapter = req->adapter;
1146 	struct zfcp_qdio *qdio = adapter->qdio;
1147 	struct fsf_qtcb *qtcb = req->qtcb;
1148 	u32 feat = adapter->adapter_features;
1149 
1150 	if (zfcp_adapter_multi_buffer_active(adapter)) {
1151 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1152 			return -EIO;
1153 		qtcb->bottom.support.req_buf_length =
1154 			zfcp_qdio_real_bytes(sg_req);
1155 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1156 			return -EIO;
1157 		qtcb->bottom.support.resp_buf_length =
1158 			zfcp_qdio_real_bytes(sg_resp);
1159 
1160 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
1161 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1162 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
1163 		return 0;
1164 	}
1165 
1166 	/* use single, unchained SBAL if it can hold the request */
1167 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1168 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1169 						sg_req, sg_resp);
1170 		return 0;
1171 	}
1172 
1173 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1174 		return -EOPNOTSUPP;
1175 
1176 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1177 		return -EIO;
1178 
1179 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1180 
1181 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1182 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1183 
1184 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1185 		return -EIO;
1186 
1187 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1188 
1189 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1190 
1191 	return 0;
1192 }
1193 
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1194 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1195 				 struct scatterlist *sg_req,
1196 				 struct scatterlist *sg_resp,
1197 				 unsigned int timeout)
1198 {
1199 	int ret;
1200 
1201 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1202 	if (ret)
1203 		return ret;
1204 
1205 	/* common settings for ct/gs and els requests */
1206 	if (timeout > 255)
1207 		timeout = 255; /* max value accepted by hardware */
1208 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1209 	req->qtcb->bottom.support.timeout = timeout;
1210 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1217  * @wka_port: pointer to zfcp WKA port to send CT/GS to
1218  * @ct: pointer to struct zfcp_send_ct with data for request
1219  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1220  * @timeout: timeout that hardware should use, and a later software timeout
1221  */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1222 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1223 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1224 		     unsigned int timeout)
1225 {
1226 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1227 	struct zfcp_fsf_req *req;
1228 	int ret = -EIO;
1229 
1230 	spin_lock_irq(&qdio->req_q_lock);
1231 	if (zfcp_qdio_sbal_get(qdio))
1232 		goto out;
1233 
1234 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1235 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1236 
1237 	if (IS_ERR(req)) {
1238 		ret = PTR_ERR(req);
1239 		goto out;
1240 	}
1241 
1242 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1243 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1244 	if (ret)
1245 		goto failed_send;
1246 
1247 	req->handler = zfcp_fsf_send_ct_handler;
1248 	req->qtcb->header.port_handle = wka_port->handle;
1249 	ct->d_id = wka_port->d_id;
1250 	req->data = ct;
1251 
1252 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1253 
1254 	ret = zfcp_fsf_req_send(req);
1255 	if (ret)
1256 		goto failed_send;
1257 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1258 
1259 	goto out;
1260 
1261 failed_send:
1262 	zfcp_fsf_req_free(req);
1263 out:
1264 	spin_unlock_irq(&qdio->req_q_lock);
1265 	return ret;
1266 }
1267 
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1268 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1269 {
1270 	struct zfcp_fsf_ct_els *send_els = req->data;
1271 	struct fsf_qtcb_header *header = &req->qtcb->header;
1272 
1273 	send_els->status = -EINVAL;
1274 
1275 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1276 		goto skip_fsfstatus;
1277 
1278 	switch (header->fsf_status) {
1279 	case FSF_GOOD:
1280 		send_els->status = 0;
1281 		zfcp_dbf_san_res("fsselh1", req);
1282 		break;
1283 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1284 		zfcp_fsf_class_not_supp(req);
1285 		break;
1286 	case FSF_ADAPTER_STATUS_AVAILABLE:
1287 		switch (header->fsf_status_qual.word[0]){
1288 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1289 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1290 		case FSF_SQ_RETRY_IF_POSSIBLE:
1291 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1292 			break;
1293 		}
1294 		break;
1295 	case FSF_ELS_COMMAND_REJECTED:
1296 	case FSF_PAYLOAD_SIZE_MISMATCH:
1297 	case FSF_REQUEST_SIZE_TOO_LARGE:
1298 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1299 		break;
1300 	case FSF_SBAL_MISMATCH:
1301 		/* should never occur, avoided in zfcp_fsf_send_els */
1302 		fallthrough;
1303 	default:
1304 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1305 		break;
1306 	}
1307 skip_fsfstatus:
1308 	if (send_els->handler)
1309 		send_els->handler(send_els->handler_data);
1310 }
1311 
1312 /**
1313  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1314  * @adapter: pointer to zfcp adapter
1315  * @d_id: N_Port_ID to send ELS to
1316  * @els: pointer to struct zfcp_send_els with data for the command
1317  * @timeout: timeout that hardware should use, and a later software timeout
1318  */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1319 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1320 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1321 {
1322 	struct zfcp_fsf_req *req;
1323 	struct zfcp_qdio *qdio = adapter->qdio;
1324 	int ret = -EIO;
1325 
1326 	spin_lock_irq(&qdio->req_q_lock);
1327 	if (zfcp_qdio_sbal_get(qdio))
1328 		goto out;
1329 
1330 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1331 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1332 
1333 	if (IS_ERR(req)) {
1334 		ret = PTR_ERR(req);
1335 		goto out;
1336 	}
1337 
1338 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1339 
1340 	if (!zfcp_adapter_multi_buffer_active(adapter))
1341 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1342 
1343 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1344 
1345 	if (ret)
1346 		goto failed_send;
1347 
1348 	hton24(req->qtcb->bottom.support.d_id, d_id);
1349 	req->handler = zfcp_fsf_send_els_handler;
1350 	els->d_id = d_id;
1351 	req->data = els;
1352 
1353 	zfcp_dbf_san_req("fssels1", req, d_id);
1354 
1355 	ret = zfcp_fsf_req_send(req);
1356 	if (ret)
1357 		goto failed_send;
1358 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1359 
1360 	goto out;
1361 
1362 failed_send:
1363 	zfcp_fsf_req_free(req);
1364 out:
1365 	spin_unlock_irq(&qdio->req_q_lock);
1366 	return ret;
1367 }
1368 
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1369 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1370 {
1371 	struct zfcp_fsf_req *req;
1372 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1373 	int retval = -EIO;
1374 
1375 	spin_lock_irq(&qdio->req_q_lock);
1376 	if (zfcp_qdio_sbal_get(qdio))
1377 		goto out;
1378 
1379 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1380 				  SBAL_SFLAGS0_TYPE_READ,
1381 				  qdio->adapter->pool.erp_req);
1382 
1383 	if (IS_ERR(req)) {
1384 		retval = PTR_ERR(req);
1385 		goto out;
1386 	}
1387 
1388 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1389 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1390 
1391 	req->qtcb->bottom.config.feature_selection =
1392 			FSF_FEATURE_NOTIFICATION_LOST |
1393 			FSF_FEATURE_UPDATE_ALERT |
1394 			FSF_FEATURE_REQUEST_SFP_DATA |
1395 			FSF_FEATURE_FC_SECURITY;
1396 	req->erp_action = erp_action;
1397 	req->handler = zfcp_fsf_exchange_config_data_handler;
1398 	erp_action->fsf_req_id = req->req_id;
1399 
1400 	zfcp_fsf_start_erp_timer(req);
1401 	retval = zfcp_fsf_req_send(req);
1402 	if (retval) {
1403 		zfcp_fsf_req_free(req);
1404 		erp_action->fsf_req_id = 0;
1405 	}
1406 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1407 out:
1408 	spin_unlock_irq(&qdio->req_q_lock);
1409 	return retval;
1410 }
1411 
1412 
1413 /**
1414  * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
1415  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1416  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1417  *	  might be %NULL.
1418  *
1419  * Returns:
1420  * * 0		- Exchange Config Data was successful, @data is complete
1421  * * -EIO	- Exchange Config Data was not successful, @data is invalid
1422  * * -EAGAIN	- @data contains incomplete data
1423  * * -ENOMEM	- Some memory allocation failed along the way
1424  */
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1425 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1426 				       struct fsf_qtcb_bottom_config *data)
1427 {
1428 	struct zfcp_fsf_req *req = NULL;
1429 	int retval = -EIO;
1430 
1431 	spin_lock_irq(&qdio->req_q_lock);
1432 	if (zfcp_qdio_sbal_get(qdio))
1433 		goto out_unlock;
1434 
1435 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1436 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1437 
1438 	if (IS_ERR(req)) {
1439 		retval = PTR_ERR(req);
1440 		goto out_unlock;
1441 	}
1442 
1443 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1444 	req->handler = zfcp_fsf_exchange_config_data_handler;
1445 
1446 	req->qtcb->bottom.config.feature_selection =
1447 			FSF_FEATURE_NOTIFICATION_LOST |
1448 			FSF_FEATURE_UPDATE_ALERT |
1449 			FSF_FEATURE_REQUEST_SFP_DATA |
1450 			FSF_FEATURE_FC_SECURITY;
1451 
1452 	if (data)
1453 		req->data = data;
1454 
1455 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1456 	retval = zfcp_fsf_req_send(req);
1457 	spin_unlock_irq(&qdio->req_q_lock);
1458 
1459 	if (!retval) {
1460 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1461 		wait_for_completion(&req->completion);
1462 
1463 		if (req->status &
1464 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1465 			retval = -EIO;
1466 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1467 			retval = -EAGAIN;
1468 	}
1469 
1470 	zfcp_fsf_req_free(req);
1471 	return retval;
1472 
1473 out_unlock:
1474 	spin_unlock_irq(&qdio->req_q_lock);
1475 	return retval;
1476 }
1477 
1478 /**
1479  * zfcp_fsf_exchange_port_data - request information about local port
1480  * @erp_action: ERP action for the adapter for which port data is requested
1481  * Returns: 0 on success, error otherwise
1482  */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1483 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1484 {
1485 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1486 	struct zfcp_fsf_req *req;
1487 	int retval = -EIO;
1488 
1489 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1490 		return -EOPNOTSUPP;
1491 
1492 	spin_lock_irq(&qdio->req_q_lock);
1493 	if (zfcp_qdio_sbal_get(qdio))
1494 		goto out;
1495 
1496 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1497 				  SBAL_SFLAGS0_TYPE_READ,
1498 				  qdio->adapter->pool.erp_req);
1499 
1500 	if (IS_ERR(req)) {
1501 		retval = PTR_ERR(req);
1502 		goto out;
1503 	}
1504 
1505 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1506 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1507 
1508 	req->handler = zfcp_fsf_exchange_port_data_handler;
1509 	req->erp_action = erp_action;
1510 	erp_action->fsf_req_id = req->req_id;
1511 
1512 	zfcp_fsf_start_erp_timer(req);
1513 	retval = zfcp_fsf_req_send(req);
1514 	if (retval) {
1515 		zfcp_fsf_req_free(req);
1516 		erp_action->fsf_req_id = 0;
1517 	}
1518 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1519 out:
1520 	spin_unlock_irq(&qdio->req_q_lock);
1521 	return retval;
1522 }
1523 
1524 /**
1525  * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
1526  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1527  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1528  *	  might be %NULL.
1529  *
1530  * Returns:
1531  * * 0		- Exchange Port Data was successful, @data is complete
1532  * * -EIO	- Exchange Port Data was not successful, @data is invalid
1533  * * -EAGAIN	- @data contains incomplete data
1534  * * -ENOMEM	- Some memory allocation failed along the way
1535  * * -EOPNOTSUPP	- This operation is not supported
1536  */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1537 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1538 				     struct fsf_qtcb_bottom_port *data)
1539 {
1540 	struct zfcp_fsf_req *req = NULL;
1541 	int retval = -EIO;
1542 
1543 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1544 		return -EOPNOTSUPP;
1545 
1546 	spin_lock_irq(&qdio->req_q_lock);
1547 	if (zfcp_qdio_sbal_get(qdio))
1548 		goto out_unlock;
1549 
1550 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1551 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1552 
1553 	if (IS_ERR(req)) {
1554 		retval = PTR_ERR(req);
1555 		goto out_unlock;
1556 	}
1557 
1558 	if (data)
1559 		req->data = data;
1560 
1561 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1562 
1563 	req->handler = zfcp_fsf_exchange_port_data_handler;
1564 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1565 	retval = zfcp_fsf_req_send(req);
1566 	spin_unlock_irq(&qdio->req_q_lock);
1567 
1568 	if (!retval) {
1569 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1570 		wait_for_completion(&req->completion);
1571 
1572 		if (req->status &
1573 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1574 			retval = -EIO;
1575 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1576 			retval = -EAGAIN;
1577 	}
1578 
1579 	zfcp_fsf_req_free(req);
1580 	return retval;
1581 
1582 out_unlock:
1583 	spin_unlock_irq(&qdio->req_q_lock);
1584 	return retval;
1585 }
1586 
zfcp_fsf_log_port_fc_security(struct zfcp_port * port,struct zfcp_fsf_req * req)1587 static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
1588 					  struct zfcp_fsf_req *req)
1589 {
1590 	char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1591 	char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1592 
1593 	if (port->connection_info == port->connection_info_old) {
1594 		/* no change, no log nor trace */
1595 		return;
1596 	}
1597 
1598 	zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
1599 			      port->connection_info_old,
1600 			      port->connection_info);
1601 
1602 	zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
1603 				      port->connection_info_old,
1604 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1605 	zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
1606 				      port->connection_info,
1607 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1608 
1609 	if (strncmp(mnemonic_old, mnemonic_new,
1610 		    ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
1611 		/* no change in string representation, no log */
1612 		goto out;
1613 	}
1614 
1615 	if (port->connection_info_old == 0) {
1616 		/* activation */
1617 		dev_info(&port->adapter->ccw_device->dev,
1618 			 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
1619 			 port->wwpn, mnemonic_new);
1620 	} else if (port->connection_info == 0) {
1621 		/* deactivation */
1622 		dev_warn(&port->adapter->ccw_device->dev,
1623 			 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
1624 			 port->wwpn, mnemonic_old);
1625 	} else {
1626 		/* change */
1627 		dev_warn(&port->adapter->ccw_device->dev,
1628 			 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
1629 			 port->wwpn, mnemonic_old, mnemonic_new);
1630 	}
1631 
1632 out:
1633 	port->connection_info_old = port->connection_info;
1634 }
1635 
zfcp_fsf_log_security_error(const struct device * dev,u32 fsf_sqw0,u64 wwpn)1636 static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
1637 					u64 wwpn)
1638 {
1639 	switch (fsf_sqw0) {
1640 
1641 	/*
1642 	 * Open Port command error codes
1643 	 */
1644 
1645 	case FSF_SQ_SECURITY_REQUIRED:
1646 		dev_warn_ratelimited(dev,
1647 				     "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
1648 				     wwpn);
1649 		break;
1650 	case FSF_SQ_SECURITY_TIMEOUT:
1651 		dev_warn_ratelimited(dev,
1652 				     "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
1653 				     wwpn);
1654 		break;
1655 	case FSF_SQ_SECURITY_KM_UNAVAILABLE:
1656 		dev_warn_ratelimited(dev,
1657 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
1658 				     wwpn);
1659 		break;
1660 	case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
1661 		dev_warn_ratelimited(dev,
1662 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
1663 				     wwpn);
1664 		break;
1665 	case FSF_SQ_SECURITY_AUTH_FAILURE:
1666 		dev_warn_ratelimited(dev,
1667 				     "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
1668 				     wwpn);
1669 		break;
1670 
1671 	/*
1672 	 * Send FCP command error codes
1673 	 */
1674 
1675 	case FSF_SQ_SECURITY_ENC_FAILURE:
1676 		dev_warn_ratelimited(dev,
1677 				     "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
1678 				     wwpn);
1679 		break;
1680 
1681 	/*
1682 	 * Unknown error codes
1683 	 */
1684 
1685 	default:
1686 		dev_warn_ratelimited(dev,
1687 				     "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
1688 				     fsf_sqw0, wwpn);
1689 	}
1690 }
1691 
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1692 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1693 {
1694 	struct zfcp_adapter *adapter = req->adapter;
1695 	struct zfcp_port *port = req->data;
1696 	struct fsf_qtcb_header *header = &req->qtcb->header;
1697 	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1698 	struct fc_els_flogi *plogi;
1699 
1700 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1701 		goto out;
1702 
1703 	switch (header->fsf_status) {
1704 	case FSF_PORT_ALREADY_OPEN:
1705 		break;
1706 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1707 		dev_warn(&adapter->ccw_device->dev,
1708 			 "Not enough FCP adapter resources to open "
1709 			 "remote port 0x%016Lx\n",
1710 			 (unsigned long long)port->wwpn);
1711 		zfcp_erp_set_port_status(port,
1712 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1713 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1714 		break;
1715 	case FSF_SECURITY_ERROR:
1716 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
1717 					    header->fsf_status_qual.word[0],
1718 					    port->wwpn);
1719 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1720 		break;
1721 	case FSF_ADAPTER_STATUS_AVAILABLE:
1722 		switch (header->fsf_status_qual.word[0]) {
1723 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1724 			/* no zfcp_fc_test_link() with failed open port */
1725 			fallthrough;
1726 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1727 		case FSF_SQ_NO_RETRY_POSSIBLE:
1728 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1729 			break;
1730 		}
1731 		break;
1732 	case FSF_GOOD:
1733 		port->handle = header->port_handle;
1734 		if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
1735 			port->connection_info = bottom->connection_info;
1736 		else
1737 			port->connection_info = 0;
1738 		zfcp_fsf_log_port_fc_security(port, req);
1739 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1740 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1741 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1742 		                  &port->status);
1743 		/* check whether D_ID has changed during open */
1744 		/*
1745 		 * FIXME: This check is not airtight, as the FCP channel does
1746 		 * not monitor closures of target port connections caused on
1747 		 * the remote side. Thus, they might miss out on invalidating
1748 		 * locally cached WWPNs (and other N_Port parameters) of gone
1749 		 * target ports. So, our heroic attempt to make things safe
1750 		 * could be undermined by 'open port' response data tagged with
1751 		 * obsolete WWPNs. Another reason to monitor potential
1752 		 * connection closures ourself at least (by interpreting
1753 		 * incoming ELS' and unsolicited status). It just crosses my
1754 		 * mind that one should be able to cross-check by means of
1755 		 * another GID_PN straight after a port has been opened.
1756 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1757 		 */
1758 		plogi = (struct fc_els_flogi *) bottom->els;
1759 		if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
1760 			zfcp_fc_plogi_evaluate(port, plogi);
1761 		break;
1762 	case FSF_UNKNOWN_OP_SUBTYPE:
1763 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1764 		break;
1765 	}
1766 
1767 out:
1768 	put_device(&port->dev);
1769 }
1770 
1771 /**
1772  * zfcp_fsf_open_port - create and send open port request
1773  * @erp_action: pointer to struct zfcp_erp_action
1774  * Returns: 0 on success, error otherwise
1775  */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1776 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1777 {
1778 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1779 	struct zfcp_port *port = erp_action->port;
1780 	struct zfcp_fsf_req *req;
1781 	int retval = -EIO;
1782 
1783 	spin_lock_irq(&qdio->req_q_lock);
1784 	if (zfcp_qdio_sbal_get(qdio))
1785 		goto out;
1786 
1787 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1788 				  SBAL_SFLAGS0_TYPE_READ,
1789 				  qdio->adapter->pool.erp_req);
1790 
1791 	if (IS_ERR(req)) {
1792 		retval = PTR_ERR(req);
1793 		goto out;
1794 	}
1795 
1796 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1797 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1798 
1799 	req->handler = zfcp_fsf_open_port_handler;
1800 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1801 	req->data = port;
1802 	req->erp_action = erp_action;
1803 	erp_action->fsf_req_id = req->req_id;
1804 	get_device(&port->dev);
1805 
1806 	zfcp_fsf_start_erp_timer(req);
1807 	retval = zfcp_fsf_req_send(req);
1808 	if (retval) {
1809 		zfcp_fsf_req_free(req);
1810 		erp_action->fsf_req_id = 0;
1811 		put_device(&port->dev);
1812 	}
1813 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1814 out:
1815 	spin_unlock_irq(&qdio->req_q_lock);
1816 	return retval;
1817 }
1818 
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1819 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1820 {
1821 	struct zfcp_port *port = req->data;
1822 
1823 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1824 		return;
1825 
1826 	switch (req->qtcb->header.fsf_status) {
1827 	case FSF_PORT_HANDLE_NOT_VALID:
1828 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1829 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1830 		break;
1831 	case FSF_ADAPTER_STATUS_AVAILABLE:
1832 		break;
1833 	case FSF_GOOD:
1834 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1835 		break;
1836 	}
1837 }
1838 
1839 /**
1840  * zfcp_fsf_close_port - create and send close port request
1841  * @erp_action: pointer to struct zfcp_erp_action
1842  * Returns: 0 on success, error otherwise
1843  */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1844 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1845 {
1846 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1847 	struct zfcp_fsf_req *req;
1848 	int retval = -EIO;
1849 
1850 	spin_lock_irq(&qdio->req_q_lock);
1851 	if (zfcp_qdio_sbal_get(qdio))
1852 		goto out;
1853 
1854 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1855 				  SBAL_SFLAGS0_TYPE_READ,
1856 				  qdio->adapter->pool.erp_req);
1857 
1858 	if (IS_ERR(req)) {
1859 		retval = PTR_ERR(req);
1860 		goto out;
1861 	}
1862 
1863 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1864 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1865 
1866 	req->handler = zfcp_fsf_close_port_handler;
1867 	req->data = erp_action->port;
1868 	req->erp_action = erp_action;
1869 	req->qtcb->header.port_handle = erp_action->port->handle;
1870 	erp_action->fsf_req_id = req->req_id;
1871 
1872 	zfcp_fsf_start_erp_timer(req);
1873 	retval = zfcp_fsf_req_send(req);
1874 	if (retval) {
1875 		zfcp_fsf_req_free(req);
1876 		erp_action->fsf_req_id = 0;
1877 	}
1878 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1879 out:
1880 	spin_unlock_irq(&qdio->req_q_lock);
1881 	return retval;
1882 }
1883 
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1884 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1885 {
1886 	struct zfcp_fc_wka_port *wka_port = req->data;
1887 	struct fsf_qtcb_header *header = &req->qtcb->header;
1888 
1889 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1890 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1891 		goto out;
1892 	}
1893 
1894 	switch (header->fsf_status) {
1895 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1896 		dev_warn(&req->adapter->ccw_device->dev,
1897 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1898 		fallthrough;
1899 	case FSF_ADAPTER_STATUS_AVAILABLE:
1900 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1901 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1902 		break;
1903 	case FSF_GOOD:
1904 		wka_port->handle = header->port_handle;
1905 		fallthrough;
1906 	case FSF_PORT_ALREADY_OPEN:
1907 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1908 	}
1909 out:
1910 	wake_up(&wka_port->completion_wq);
1911 }
1912 
1913 /**
1914  * zfcp_fsf_open_wka_port - create and send open wka-port request
1915  * @wka_port: pointer to struct zfcp_fc_wka_port
1916  * Returns: 0 on success, error otherwise
1917  */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1918 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1919 {
1920 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1921 	struct zfcp_fsf_req *req;
1922 	unsigned long req_id = 0;
1923 	int retval = -EIO;
1924 
1925 	spin_lock_irq(&qdio->req_q_lock);
1926 	if (zfcp_qdio_sbal_get(qdio))
1927 		goto out;
1928 
1929 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1930 				  SBAL_SFLAGS0_TYPE_READ,
1931 				  qdio->adapter->pool.erp_req);
1932 
1933 	if (IS_ERR(req)) {
1934 		retval = PTR_ERR(req);
1935 		goto out;
1936 	}
1937 
1938 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1939 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1940 
1941 	req->handler = zfcp_fsf_open_wka_port_handler;
1942 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1943 	req->data = wka_port;
1944 
1945 	req_id = req->req_id;
1946 
1947 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1948 	retval = zfcp_fsf_req_send(req);
1949 	if (retval)
1950 		zfcp_fsf_req_free(req);
1951 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1952 out:
1953 	spin_unlock_irq(&qdio->req_q_lock);
1954 	if (!retval)
1955 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1956 	return retval;
1957 }
1958 
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1959 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1960 {
1961 	struct zfcp_fc_wka_port *wka_port = req->data;
1962 
1963 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1964 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1965 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1966 	}
1967 
1968 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1969 	wake_up(&wka_port->completion_wq);
1970 }
1971 
1972 /**
1973  * zfcp_fsf_close_wka_port - create and send close wka port request
1974  * @wka_port: WKA port to open
1975  * Returns: 0 on success, error otherwise
1976  */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1977 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1978 {
1979 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1980 	struct zfcp_fsf_req *req;
1981 	unsigned long req_id = 0;
1982 	int retval = -EIO;
1983 
1984 	spin_lock_irq(&qdio->req_q_lock);
1985 	if (zfcp_qdio_sbal_get(qdio))
1986 		goto out;
1987 
1988 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1989 				  SBAL_SFLAGS0_TYPE_READ,
1990 				  qdio->adapter->pool.erp_req);
1991 
1992 	if (IS_ERR(req)) {
1993 		retval = PTR_ERR(req);
1994 		goto out;
1995 	}
1996 
1997 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1998 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1999 
2000 	req->handler = zfcp_fsf_close_wka_port_handler;
2001 	req->data = wka_port;
2002 	req->qtcb->header.port_handle = wka_port->handle;
2003 
2004 	req_id = req->req_id;
2005 
2006 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2007 	retval = zfcp_fsf_req_send(req);
2008 	if (retval)
2009 		zfcp_fsf_req_free(req);
2010 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2011 out:
2012 	spin_unlock_irq(&qdio->req_q_lock);
2013 	if (!retval)
2014 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
2015 	return retval;
2016 }
2017 
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)2018 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
2019 {
2020 	struct zfcp_port *port = req->data;
2021 	struct fsf_qtcb_header *header = &req->qtcb->header;
2022 	struct scsi_device *sdev;
2023 
2024 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2025 		return;
2026 
2027 	switch (header->fsf_status) {
2028 	case FSF_PORT_HANDLE_NOT_VALID:
2029 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
2030 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2031 		break;
2032 	case FSF_PORT_BOXED:
2033 		/* can't use generic zfcp_erp_modify_port_status because
2034 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
2035 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2036 		shost_for_each_device(sdev, port->adapter->scsi_host)
2037 			if (sdev_to_zfcp(sdev)->port == port)
2038 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2039 						  &sdev_to_zfcp(sdev)->status);
2040 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2041 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
2042 				     "fscpph2");
2043 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2044 		break;
2045 	case FSF_ADAPTER_STATUS_AVAILABLE:
2046 		switch (header->fsf_status_qual.word[0]) {
2047 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2048 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2049 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2050 			break;
2051 		}
2052 		break;
2053 	case FSF_GOOD:
2054 		/* can't use generic zfcp_erp_modify_port_status because
2055 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2056 		 */
2057 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2058 		shost_for_each_device(sdev, port->adapter->scsi_host)
2059 			if (sdev_to_zfcp(sdev)->port == port)
2060 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2061 						  &sdev_to_zfcp(sdev)->status);
2062 		break;
2063 	}
2064 }
2065 
2066 /**
2067  * zfcp_fsf_close_physical_port - close physical port
2068  * @erp_action: pointer to struct zfcp_erp_action
2069  * Returns: 0 on success
2070  */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)2071 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2072 {
2073 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2074 	struct zfcp_fsf_req *req;
2075 	int retval = -EIO;
2076 
2077 	spin_lock_irq(&qdio->req_q_lock);
2078 	if (zfcp_qdio_sbal_get(qdio))
2079 		goto out;
2080 
2081 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
2082 				  SBAL_SFLAGS0_TYPE_READ,
2083 				  qdio->adapter->pool.erp_req);
2084 
2085 	if (IS_ERR(req)) {
2086 		retval = PTR_ERR(req);
2087 		goto out;
2088 	}
2089 
2090 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2091 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2092 
2093 	req->data = erp_action->port;
2094 	req->qtcb->header.port_handle = erp_action->port->handle;
2095 	req->erp_action = erp_action;
2096 	req->handler = zfcp_fsf_close_physical_port_handler;
2097 	erp_action->fsf_req_id = req->req_id;
2098 
2099 	zfcp_fsf_start_erp_timer(req);
2100 	retval = zfcp_fsf_req_send(req);
2101 	if (retval) {
2102 		zfcp_fsf_req_free(req);
2103 		erp_action->fsf_req_id = 0;
2104 	}
2105 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2106 out:
2107 	spin_unlock_irq(&qdio->req_q_lock);
2108 	return retval;
2109 }
2110 
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)2111 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
2112 {
2113 	struct zfcp_adapter *adapter = req->adapter;
2114 	struct scsi_device *sdev = req->data;
2115 	struct zfcp_scsi_dev *zfcp_sdev;
2116 	struct fsf_qtcb_header *header = &req->qtcb->header;
2117 	union fsf_status_qual *qual = &header->fsf_status_qual;
2118 
2119 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2120 		return;
2121 
2122 	zfcp_sdev = sdev_to_zfcp(sdev);
2123 
2124 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2125 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
2126 			  &zfcp_sdev->status);
2127 
2128 	switch (header->fsf_status) {
2129 
2130 	case FSF_PORT_HANDLE_NOT_VALID:
2131 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
2132 		fallthrough;
2133 	case FSF_LUN_ALREADY_OPEN:
2134 		break;
2135 	case FSF_PORT_BOXED:
2136 		zfcp_erp_set_port_status(zfcp_sdev->port,
2137 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2138 		zfcp_erp_port_reopen(zfcp_sdev->port,
2139 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
2140 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2141 		break;
2142 	case FSF_LUN_SHARING_VIOLATION:
2143 		if (qual->word[0])
2144 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
2145 				 "LUN 0x%016Lx on port 0x%016Lx is already in "
2146 				 "use by CSS%d, MIF Image ID %x\n",
2147 				 zfcp_scsi_dev_lun(sdev),
2148 				 (unsigned long long)zfcp_sdev->port->wwpn,
2149 				 qual->fsf_queue_designator.cssid,
2150 				 qual->fsf_queue_designator.hla);
2151 		zfcp_erp_set_lun_status(sdev,
2152 					ZFCP_STATUS_COMMON_ERP_FAILED |
2153 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
2154 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2155 		break;
2156 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
2157 		dev_warn(&adapter->ccw_device->dev,
2158 			 "No handle is available for LUN "
2159 			 "0x%016Lx on port 0x%016Lx\n",
2160 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2161 			 (unsigned long long)zfcp_sdev->port->wwpn);
2162 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
2163 		fallthrough;
2164 	case FSF_INVALID_COMMAND_OPTION:
2165 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2166 		break;
2167 	case FSF_ADAPTER_STATUS_AVAILABLE:
2168 		switch (header->fsf_status_qual.word[0]) {
2169 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2170 			zfcp_fc_test_link(zfcp_sdev->port);
2171 			fallthrough;
2172 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2173 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2174 			break;
2175 		}
2176 		break;
2177 
2178 	case FSF_GOOD:
2179 		zfcp_sdev->lun_handle = header->lun_handle;
2180 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2181 		break;
2182 	}
2183 }
2184 
2185 /**
2186  * zfcp_fsf_open_lun - open LUN
2187  * @erp_action: pointer to struct zfcp_erp_action
2188  * Returns: 0 on success, error otherwise
2189  */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)2190 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
2191 {
2192 	struct zfcp_adapter *adapter = erp_action->adapter;
2193 	struct zfcp_qdio *qdio = adapter->qdio;
2194 	struct zfcp_fsf_req *req;
2195 	int retval = -EIO;
2196 
2197 	spin_lock_irq(&qdio->req_q_lock);
2198 	if (zfcp_qdio_sbal_get(qdio))
2199 		goto out;
2200 
2201 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
2202 				  SBAL_SFLAGS0_TYPE_READ,
2203 				  adapter->pool.erp_req);
2204 
2205 	if (IS_ERR(req)) {
2206 		retval = PTR_ERR(req);
2207 		goto out;
2208 	}
2209 
2210 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2211 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2212 
2213 	req->qtcb->header.port_handle = erp_action->port->handle;
2214 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
2215 	req->handler = zfcp_fsf_open_lun_handler;
2216 	req->data = erp_action->sdev;
2217 	req->erp_action = erp_action;
2218 	erp_action->fsf_req_id = req->req_id;
2219 
2220 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2221 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
2222 
2223 	zfcp_fsf_start_erp_timer(req);
2224 	retval = zfcp_fsf_req_send(req);
2225 	if (retval) {
2226 		zfcp_fsf_req_free(req);
2227 		erp_action->fsf_req_id = 0;
2228 	}
2229 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2230 out:
2231 	spin_unlock_irq(&qdio->req_q_lock);
2232 	return retval;
2233 }
2234 
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)2235 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
2236 {
2237 	struct scsi_device *sdev = req->data;
2238 	struct zfcp_scsi_dev *zfcp_sdev;
2239 
2240 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2241 		return;
2242 
2243 	zfcp_sdev = sdev_to_zfcp(sdev);
2244 
2245 	switch (req->qtcb->header.fsf_status) {
2246 	case FSF_PORT_HANDLE_NOT_VALID:
2247 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
2248 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2249 		break;
2250 	case FSF_LUN_HANDLE_NOT_VALID:
2251 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
2252 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2253 		break;
2254 	case FSF_PORT_BOXED:
2255 		zfcp_erp_set_port_status(zfcp_sdev->port,
2256 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2257 		zfcp_erp_port_reopen(zfcp_sdev->port,
2258 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
2259 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2260 		break;
2261 	case FSF_ADAPTER_STATUS_AVAILABLE:
2262 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
2263 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2264 			zfcp_fc_test_link(zfcp_sdev->port);
2265 			fallthrough;
2266 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2267 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2268 			break;
2269 		}
2270 		break;
2271 	case FSF_GOOD:
2272 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2273 		break;
2274 	}
2275 }
2276 
2277 /**
2278  * zfcp_fsf_close_lun - close LUN
2279  * @erp_action: pointer to erp_action triggering the "close LUN"
2280  * Returns: 0 on success, error otherwise
2281  */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)2282 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
2283 {
2284 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2285 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
2286 	struct zfcp_fsf_req *req;
2287 	int retval = -EIO;
2288 
2289 	spin_lock_irq(&qdio->req_q_lock);
2290 	if (zfcp_qdio_sbal_get(qdio))
2291 		goto out;
2292 
2293 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2294 				  SBAL_SFLAGS0_TYPE_READ,
2295 				  qdio->adapter->pool.erp_req);
2296 
2297 	if (IS_ERR(req)) {
2298 		retval = PTR_ERR(req);
2299 		goto out;
2300 	}
2301 
2302 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2303 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2304 
2305 	req->qtcb->header.port_handle = erp_action->port->handle;
2306 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2307 	req->handler = zfcp_fsf_close_lun_handler;
2308 	req->data = erp_action->sdev;
2309 	req->erp_action = erp_action;
2310 	erp_action->fsf_req_id = req->req_id;
2311 
2312 	zfcp_fsf_start_erp_timer(req);
2313 	retval = zfcp_fsf_req_send(req);
2314 	if (retval) {
2315 		zfcp_fsf_req_free(req);
2316 		erp_action->fsf_req_id = 0;
2317 	}
2318 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2319 out:
2320 	spin_unlock_irq(&qdio->req_q_lock);
2321 	return retval;
2322 }
2323 
zfcp_fsf_update_lat(struct zfcp_latency_record * lat_rec,u32 lat)2324 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
2325 {
2326 	lat_rec->sum += lat;
2327 	lat_rec->min = min(lat_rec->min, lat);
2328 	lat_rec->max = max(lat_rec->max, lat);
2329 }
2330 
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)2331 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2332 {
2333 	struct fsf_qual_latency_info *lat_in;
2334 	struct zfcp_latency_cont *lat = NULL;
2335 	struct zfcp_scsi_dev *zfcp_sdev;
2336 	struct zfcp_blk_drv_data blktrc;
2337 	int ticks = req->adapter->timer_ticks;
2338 
2339 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2340 
2341 	blktrc.flags = 0;
2342 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2343 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2344 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2345 	blktrc.inb_usage = 0;
2346 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2347 
2348 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2349 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2350 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2351 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2352 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2353 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2354 
2355 		switch (req->qtcb->bottom.io.data_direction) {
2356 		case FSF_DATADIR_DIF_READ_STRIP:
2357 		case FSF_DATADIR_DIF_READ_CONVERT:
2358 		case FSF_DATADIR_READ:
2359 			lat = &zfcp_sdev->latencies.read;
2360 			break;
2361 		case FSF_DATADIR_DIF_WRITE_INSERT:
2362 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2363 		case FSF_DATADIR_WRITE:
2364 			lat = &zfcp_sdev->latencies.write;
2365 			break;
2366 		case FSF_DATADIR_CMND:
2367 			lat = &zfcp_sdev->latencies.cmd;
2368 			break;
2369 		}
2370 
2371 		if (lat) {
2372 			spin_lock(&zfcp_sdev->latencies.lock);
2373 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2374 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2375 			lat->counter++;
2376 			spin_unlock(&zfcp_sdev->latencies.lock);
2377 		}
2378 	}
2379 
2380 	blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
2381 }
2382 
2383 /**
2384  * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2385  * @req: Pointer to FSF request.
2386  * @sdev: Pointer to SCSI device as request context.
2387  */
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req,struct scsi_device * sdev)2388 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2389 					struct scsi_device *sdev)
2390 {
2391 	struct zfcp_scsi_dev *zfcp_sdev;
2392 	struct fsf_qtcb_header *header = &req->qtcb->header;
2393 
2394 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2395 		return;
2396 
2397 	zfcp_sdev = sdev_to_zfcp(sdev);
2398 
2399 	switch (header->fsf_status) {
2400 	case FSF_HANDLE_MISMATCH:
2401 	case FSF_PORT_HANDLE_NOT_VALID:
2402 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2403 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2404 		break;
2405 	case FSF_FCPLUN_NOT_VALID:
2406 	case FSF_LUN_HANDLE_NOT_VALID:
2407 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2408 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2409 		break;
2410 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2411 		zfcp_fsf_class_not_supp(req);
2412 		break;
2413 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2414 		dev_err(&req->adapter->ccw_device->dev,
2415 			"Incorrect direction %d, LUN 0x%016Lx on port "
2416 			"0x%016Lx closed\n",
2417 			req->qtcb->bottom.io.data_direction,
2418 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2419 			(unsigned long long)zfcp_sdev->port->wwpn);
2420 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2421 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2422 		break;
2423 	case FSF_CMND_LENGTH_NOT_VALID:
2424 		dev_err(&req->adapter->ccw_device->dev,
2425 			"Incorrect FCP_CMND length %d, FCP device closed\n",
2426 			req->qtcb->bottom.io.fcp_cmnd_length);
2427 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2428 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2429 		break;
2430 	case FSF_PORT_BOXED:
2431 		zfcp_erp_set_port_status(zfcp_sdev->port,
2432 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2433 		zfcp_erp_port_reopen(zfcp_sdev->port,
2434 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2435 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2436 		break;
2437 	case FSF_LUN_BOXED:
2438 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2439 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2440 				    "fssfch6");
2441 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2442 		break;
2443 	case FSF_ADAPTER_STATUS_AVAILABLE:
2444 		if (header->fsf_status_qual.word[0] ==
2445 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2446 			zfcp_fc_test_link(zfcp_sdev->port);
2447 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2448 		break;
2449 	case FSF_SECURITY_ERROR:
2450 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
2451 					    header->fsf_status_qual.word[0],
2452 					    zfcp_sdev->port->wwpn);
2453 		zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
2454 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2455 		break;
2456 	}
2457 }
2458 
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2459 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2460 {
2461 	struct scsi_cmnd *scpnt;
2462 	struct fcp_resp_with_ext *fcp_rsp;
2463 	unsigned long flags;
2464 
2465 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2466 
2467 	scpnt = req->data;
2468 	if (unlikely(!scpnt)) {
2469 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2470 		return;
2471 	}
2472 
2473 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
2474 
2475 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2476 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2477 		goto skip_fsfstatus;
2478 	}
2479 
2480 	switch (req->qtcb->header.fsf_status) {
2481 	case FSF_INCONSISTENT_PROT_DATA:
2482 	case FSF_INVALID_PROT_PARM:
2483 		set_host_byte(scpnt, DID_ERROR);
2484 		goto skip_fsfstatus;
2485 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2486 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2487 		goto skip_fsfstatus;
2488 	case FSF_APP_TAG_CHECK_FAILURE:
2489 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2490 		goto skip_fsfstatus;
2491 	case FSF_REF_TAG_CHECK_FAILURE:
2492 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2493 		goto skip_fsfstatus;
2494 	}
2495 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2496 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2497 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2498 
2499 skip_fsfstatus:
2500 	zfcp_fsf_req_trace(req, scpnt);
2501 	zfcp_dbf_scsi_result(scpnt, req);
2502 
2503 	scpnt->host_scribble = NULL;
2504 	scsi_done(scpnt);
2505 	/*
2506 	 * We must hold this lock until scsi_done has been called.
2507 	 * Otherwise we may call scsi_done after abort regarding this
2508 	 * command has completed.
2509 	 * Note: scsi_done must not block!
2510 	 */
2511 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2512 }
2513 
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2514 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2515 {
2516 	switch (scsi_get_prot_op(scsi_cmnd)) {
2517 	case SCSI_PROT_NORMAL:
2518 		switch (scsi_cmnd->sc_data_direction) {
2519 		case DMA_NONE:
2520 			*data_dir = FSF_DATADIR_CMND;
2521 			break;
2522 		case DMA_FROM_DEVICE:
2523 			*data_dir = FSF_DATADIR_READ;
2524 			break;
2525 		case DMA_TO_DEVICE:
2526 			*data_dir = FSF_DATADIR_WRITE;
2527 			break;
2528 		case DMA_BIDIRECTIONAL:
2529 			return -EINVAL;
2530 		}
2531 		break;
2532 
2533 	case SCSI_PROT_READ_STRIP:
2534 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2535 		break;
2536 	case SCSI_PROT_WRITE_INSERT:
2537 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2538 		break;
2539 	case SCSI_PROT_READ_PASS:
2540 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2541 		break;
2542 	case SCSI_PROT_WRITE_PASS:
2543 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2544 		break;
2545 	default:
2546 		return -EINVAL;
2547 	}
2548 
2549 	return 0;
2550 }
2551 
2552 /**
2553  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2554  * @scsi_cmnd: scsi command to be sent
2555  */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2556 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2557 {
2558 	struct zfcp_fsf_req *req;
2559 	struct fcp_cmnd *fcp_cmnd;
2560 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2561 	int retval = -EIO;
2562 	struct scsi_device *sdev = scsi_cmnd->device;
2563 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2564 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2565 	struct zfcp_qdio *qdio = adapter->qdio;
2566 	struct fsf_qtcb_bottom_io *io;
2567 	unsigned long flags;
2568 
2569 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2570 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2571 		return -EBUSY;
2572 
2573 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2574 	if (atomic_read(&qdio->req_q_free) <= 0) {
2575 		atomic_inc(&qdio->req_q_full);
2576 		goto out;
2577 	}
2578 
2579 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2580 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2581 
2582 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2583 				  sbtype, adapter->pool.scsi_req);
2584 
2585 	if (IS_ERR(req)) {
2586 		retval = PTR_ERR(req);
2587 		goto out;
2588 	}
2589 
2590 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2591 
2592 	io = &req->qtcb->bottom.io;
2593 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2594 	req->data = scsi_cmnd;
2595 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2596 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2597 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2598 	io->service_class = FSF_CLASS_3;
2599 	io->fcp_cmnd_length = FCP_CMND_LEN;
2600 
2601 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2602 		io->data_block_length = scsi_prot_interval(scsi_cmnd);
2603 		io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
2604 	}
2605 
2606 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2607 		goto failed_scsi_cmnd;
2608 
2609 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2610 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2611 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2612 
2613 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2614 	    scsi_prot_sg_count(scsi_cmnd)) {
2615 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2616 				       scsi_prot_sg_count(scsi_cmnd));
2617 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2618 						 scsi_prot_sglist(scsi_cmnd));
2619 		if (retval)
2620 			goto failed_scsi_cmnd;
2621 		io->prot_data_length = zfcp_qdio_real_bytes(
2622 						scsi_prot_sglist(scsi_cmnd));
2623 	}
2624 
2625 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2626 					 scsi_sglist(scsi_cmnd));
2627 	if (unlikely(retval))
2628 		goto failed_scsi_cmnd;
2629 
2630 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2631 	if (zfcp_adapter_multi_buffer_active(adapter))
2632 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2633 
2634 	retval = zfcp_fsf_req_send(req);
2635 	if (unlikely(retval))
2636 		goto failed_scsi_cmnd;
2637 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2638 
2639 	goto out;
2640 
2641 failed_scsi_cmnd:
2642 	zfcp_fsf_req_free(req);
2643 	scsi_cmnd->host_scribble = NULL;
2644 out:
2645 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2646 	return retval;
2647 }
2648 
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2649 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2650 {
2651 	struct scsi_device *sdev = req->data;
2652 	struct fcp_resp_with_ext *fcp_rsp;
2653 	struct fcp_resp_rsp_info *rsp_info;
2654 
2655 	zfcp_fsf_fcp_handler_common(req, sdev);
2656 
2657 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2658 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2659 
2660 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2661 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2662 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2663 }
2664 
2665 /**
2666  * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2667  * @sdev: Pointer to SCSI device to send the task management command to.
2668  * @tm_flags: Unsigned byte for task management flags.
2669  *
2670  * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2671  */
zfcp_fsf_fcp_task_mgmt(struct scsi_device * sdev,u8 tm_flags)2672 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2673 					    u8 tm_flags)
2674 {
2675 	struct zfcp_fsf_req *req = NULL;
2676 	struct fcp_cmnd *fcp_cmnd;
2677 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2678 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2679 
2680 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2681 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2682 		return NULL;
2683 
2684 	spin_lock_irq(&qdio->req_q_lock);
2685 	if (zfcp_qdio_sbal_get(qdio))
2686 		goto out;
2687 
2688 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2689 				  SBAL_SFLAGS0_TYPE_WRITE,
2690 				  qdio->adapter->pool.scsi_req);
2691 
2692 	if (IS_ERR(req)) {
2693 		req = NULL;
2694 		goto out;
2695 	}
2696 
2697 	req->data = sdev;
2698 
2699 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2700 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2701 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2702 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2703 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2704 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2705 
2706 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2707 
2708 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2709 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2710 
2711 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
2712 	if (!zfcp_fsf_req_send(req)) {
2713 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
2714 		goto out;
2715 	}
2716 
2717 	zfcp_fsf_req_free(req);
2718 	req = NULL;
2719 out:
2720 	spin_unlock_irq(&qdio->req_q_lock);
2721 	return req;
2722 }
2723 
2724 /**
2725  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2726  * @qdio: pointer to struct zfcp_qdio
2727  * @sbal_idx: response queue index of SBAL to be processed
2728  */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2729 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2730 {
2731 	struct zfcp_adapter *adapter = qdio->adapter;
2732 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2733 	struct qdio_buffer_element *sbale;
2734 	struct zfcp_fsf_req *fsf_req;
2735 	unsigned long req_id;
2736 	int idx;
2737 
2738 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2739 
2740 		sbale = &sbal->element[idx];
2741 		req_id = sbale->addr;
2742 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2743 
2744 		if (!fsf_req) {
2745 			/*
2746 			 * Unknown request means that we have potentially memory
2747 			 * corruption and must stop the machine immediately.
2748 			 */
2749 			zfcp_qdio_siosl(adapter);
2750 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2751 			      req_id, dev_name(&adapter->ccw_device->dev));
2752 		}
2753 
2754 		zfcp_fsf_req_complete(fsf_req);
2755 
2756 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2757 			break;
2758 	}
2759 }
2760