Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2015 Intel Corporation. All rights reserved.
3 : * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
4 : */
5 :
6 : #include "nvme_internal.h"
7 : #include "spdk/nvme.h"
8 :
9 : int
10 1 : spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
11 : struct spdk_nvme_qpair *qpair,
12 : struct spdk_nvme_cmd *cmd,
13 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
14 : {
15 : struct nvme_request *req;
16 : struct nvme_payload payload;
17 :
18 1 : if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
19 1 : return -EINVAL;
20 : }
21 :
22 0 : memset(&payload, 0, sizeof(payload));
23 0 : req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
24 :
25 0 : if (req == NULL) {
26 0 : return -ENOMEM;
27 : }
28 :
29 0 : memcpy(&req->cmd, cmd, sizeof(req->cmd));
30 :
31 0 : return nvme_qpair_submit_request(qpair, req);
32 1 : }
33 :
34 : int
35 1 : spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
36 : struct spdk_nvme_qpair *qpair,
37 : struct spdk_nvme_cmd *cmd,
38 : void *buf, uint32_t len,
39 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
40 : {
41 : struct nvme_request *req;
42 :
43 1 : req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
44 :
45 1 : if (req == NULL) {
46 1 : return -ENOMEM;
47 : }
48 :
49 0 : memcpy(&req->cmd, cmd, sizeof(req->cmd));
50 :
51 0 : return nvme_qpair_submit_request(qpair, req);
52 1 : }
53 :
54 : int
55 1 : spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
56 : struct spdk_nvme_qpair *qpair,
57 : struct spdk_nvme_cmd *cmd,
58 : void *buf, uint32_t len, void *md_buf,
59 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
60 : {
61 : struct nvme_request *req;
62 : struct nvme_payload payload;
63 1 : uint32_t md_len = 0;
64 :
65 1 : payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
66 :
67 : /* Calculate metadata length */
68 1 : if (md_buf) {
69 0 : struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
70 :
71 0 : assert(ns != NULL);
72 0 : assert(ns->sector_size != 0);
73 0 : md_len = len / ns->sector_size * ns->md_size;
74 0 : }
75 :
76 1 : req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
77 1 : if (req == NULL) {
78 1 : return -ENOMEM;
79 : }
80 :
81 0 : memcpy(&req->cmd, cmd, sizeof(req->cmd));
82 :
83 0 : return nvme_qpair_submit_request(qpair, req);
84 1 : }
85 :
86 : int
87 0 : spdk_nvme_ctrlr_cmd_iov_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
88 : struct spdk_nvme_qpair *qpair,
89 : struct spdk_nvme_cmd *cmd,
90 : uint32_t len, void *md_buf,
91 : spdk_nvme_cmd_cb cb_fn, void *cb_arg,
92 : spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
93 : spdk_nvme_req_next_sge_cb next_sge_fn)
94 : {
95 : struct nvme_request *req;
96 : struct nvme_payload payload;
97 0 : uint32_t md_len = 0;
98 :
99 0 : if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
100 0 : return -EINVAL;
101 : }
102 :
103 0 : payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, md_buf);
104 :
105 : /* Calculate metadata length */
106 0 : if (md_buf) {
107 0 : struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
108 :
109 0 : assert(ns != NULL);
110 0 : assert(ns->sector_size != 0);
111 0 : md_len = len / ns->sector_size * ns->md_size;
112 0 : }
113 :
114 0 : req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
115 0 : if (req == NULL) {
116 0 : return -ENOMEM;
117 : }
118 :
119 0 : memcpy(&req->cmd, cmd, sizeof(req->cmd));
120 :
121 0 : return nvme_qpair_submit_request(qpair, req);
122 0 : }
123 :
124 : int
125 0 : spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
126 : struct spdk_nvme_cmd *cmd,
127 : void *buf, uint32_t len,
128 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
129 : {
130 : struct nvme_request *req;
131 : int rc;
132 :
133 0 : nvme_ctrlr_lock(ctrlr);
134 0 : req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
135 0 : if (req == NULL) {
136 0 : nvme_ctrlr_unlock(ctrlr);
137 0 : return -ENOMEM;
138 : }
139 :
140 0 : memcpy(&req->cmd, cmd, sizeof(req->cmd));
141 :
142 0 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
143 :
144 0 : nvme_ctrlr_unlock(ctrlr);
145 0 : return rc;
146 0 : }
147 :
148 : int
149 1 : nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
150 : uint8_t csi, void *payload, size_t payload_size,
151 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
152 : {
153 : struct nvme_request *req;
154 : struct spdk_nvme_cmd *cmd;
155 : int rc;
156 :
157 1 : nvme_ctrlr_lock(ctrlr);
158 2 : req = nvme_allocate_request_user_copy(ctrlr->adminq,
159 1 : payload, payload_size,
160 1 : cb_fn, cb_arg, false);
161 1 : if (req == NULL) {
162 0 : nvme_ctrlr_unlock(ctrlr);
163 0 : return -ENOMEM;
164 : }
165 :
166 1 : cmd = &req->cmd;
167 1 : cmd->opc = SPDK_NVME_OPC_IDENTIFY;
168 1 : cmd->cdw10_bits.identify.cns = cns;
169 1 : cmd->cdw10_bits.identify.cntid = cntid;
170 1 : cmd->cdw11_bits.identify.csi = csi;
171 1 : cmd->nsid = nsid;
172 :
173 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
174 :
175 1 : nvme_ctrlr_unlock(ctrlr);
176 1 : return rc;
177 1 : }
178 :
179 : int
180 1 : nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
181 : struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
182 : {
183 : struct nvme_request *req;
184 : struct spdk_nvme_cmd *cmd;
185 : int rc;
186 :
187 1 : nvme_ctrlr_lock(ctrlr);
188 2 : req = nvme_allocate_request_user_copy(ctrlr->adminq,
189 1 : payload, sizeof(struct spdk_nvme_ctrlr_list),
190 1 : cb_fn, cb_arg, true);
191 1 : if (req == NULL) {
192 0 : nvme_ctrlr_unlock(ctrlr);
193 0 : return -ENOMEM;
194 : }
195 :
196 1 : cmd = &req->cmd;
197 1 : cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
198 1 : cmd->nsid = nsid;
199 1 : cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
200 :
201 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
202 :
203 1 : nvme_ctrlr_unlock(ctrlr);
204 1 : return rc;
205 1 : }
206 :
207 : int
208 1 : nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
209 : struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
210 : {
211 : struct nvme_request *req;
212 : struct spdk_nvme_cmd *cmd;
213 : int rc;
214 :
215 1 : nvme_ctrlr_lock(ctrlr);
216 2 : req = nvme_allocate_request_user_copy(ctrlr->adminq,
217 1 : payload, sizeof(struct spdk_nvme_ctrlr_list),
218 1 : cb_fn, cb_arg, true);
219 1 : if (req == NULL) {
220 0 : nvme_ctrlr_unlock(ctrlr);
221 0 : return -ENOMEM;
222 : }
223 :
224 1 : cmd = &req->cmd;
225 1 : cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
226 1 : cmd->nsid = nsid;
227 1 : cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
228 :
229 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
230 :
231 1 : nvme_ctrlr_unlock(ctrlr);
232 1 : return rc;
233 1 : }
234 :
235 : int
236 1 : nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
237 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
238 : {
239 : struct nvme_request *req;
240 : struct spdk_nvme_cmd *cmd;
241 : int rc;
242 :
243 1 : nvme_ctrlr_lock(ctrlr);
244 2 : req = nvme_allocate_request_user_copy(ctrlr->adminq,
245 1 : payload, sizeof(struct spdk_nvme_ns_data),
246 1 : cb_fn, cb_arg, true);
247 1 : if (req == NULL) {
248 0 : nvme_ctrlr_unlock(ctrlr);
249 0 : return -ENOMEM;
250 : }
251 :
252 1 : cmd = &req->cmd;
253 1 : cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
254 1 : cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
255 :
256 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
257 :
258 1 : nvme_ctrlr_unlock(ctrlr);
259 1 : return rc;
260 1 : }
261 :
262 : int
263 1 : nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
264 : void *cb_arg)
265 : {
266 : struct nvme_request *req;
267 : struct spdk_nvme_cmd *cmd;
268 : int rc;
269 :
270 1 : nvme_ctrlr_lock(ctrlr);
271 1 : req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
272 1 : if (req == NULL) {
273 0 : nvme_ctrlr_unlock(ctrlr);
274 0 : return -ENOMEM;
275 : }
276 :
277 1 : cmd = &req->cmd;
278 1 : cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
279 1 : cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
280 1 : cmd->nsid = nsid;
281 :
282 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
283 :
284 1 : nvme_ctrlr_unlock(ctrlr);
285 1 : return rc;
286 1 : }
287 :
288 : int
289 1 : nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
290 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
291 : {
292 : struct nvme_request *req;
293 : struct spdk_nvme_cmd *cmd;
294 : int rc;
295 :
296 1 : nvme_ctrlr_lock(ctrlr);
297 1 : req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
298 1 : if (req == NULL) {
299 0 : nvme_ctrlr_unlock(ctrlr);
300 0 : return -ENOMEM;
301 : }
302 :
303 1 : cmd = &req->cmd;
304 1 : cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
305 1 : cmd->dptr.prp.prp1 = prp1;
306 1 : cmd->dptr.prp.prp2 = prp2;
307 :
308 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
309 :
310 1 : nvme_ctrlr_unlock(ctrlr);
311 1 : return rc;
312 1 : }
313 :
314 : int
315 1 : nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
316 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
317 : {
318 : struct nvme_request *req;
319 : struct spdk_nvme_cmd *cmd;
320 : int rc;
321 :
322 1 : nvme_ctrlr_lock(ctrlr);
323 1 : req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
324 1 : if (req == NULL) {
325 0 : nvme_ctrlr_unlock(ctrlr);
326 0 : return -ENOMEM;
327 : }
328 :
329 1 : cmd = &req->cmd;
330 1 : cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
331 1 : cmd->nsid = nsid;
332 1 : memcpy(&cmd->cdw10, format, sizeof(uint32_t));
333 :
334 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
335 1 : nvme_ctrlr_unlock(ctrlr);
336 :
337 1 : return rc;
338 1 : }
339 :
340 : int
341 3 : spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
342 : uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
343 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
344 : {
345 : struct nvme_request *req;
346 : struct spdk_nvme_cmd *cmd;
347 : int rc;
348 :
349 3 : nvme_ctrlr_lock(ctrlr);
350 3 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
351 : true);
352 3 : if (req == NULL) {
353 0 : nvme_ctrlr_unlock(ctrlr);
354 0 : return -ENOMEM;
355 : }
356 :
357 3 : cmd = &req->cmd;
358 3 : cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
359 3 : cmd->cdw10_bits.set_features.fid = feature;
360 3 : cmd->cdw11 = cdw11;
361 3 : cmd->cdw12 = cdw12;
362 :
363 3 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
364 3 : nvme_ctrlr_unlock(ctrlr);
365 :
366 3 : return rc;
367 3 : }
368 :
369 : int
370 1 : spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
371 : uint32_t cdw11, void *payload, uint32_t payload_size,
372 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
373 : {
374 : struct nvme_request *req;
375 : struct spdk_nvme_cmd *cmd;
376 : int rc;
377 :
378 1 : nvme_ctrlr_lock(ctrlr);
379 1 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
380 : false);
381 1 : if (req == NULL) {
382 0 : nvme_ctrlr_unlock(ctrlr);
383 0 : return -ENOMEM;
384 : }
385 :
386 1 : cmd = &req->cmd;
387 1 : cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
388 1 : cmd->cdw10_bits.get_features.fid = feature;
389 1 : cmd->cdw11 = cdw11;
390 :
391 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
392 1 : nvme_ctrlr_unlock(ctrlr);
393 :
394 1 : return rc;
395 1 : }
396 :
397 : int
398 1 : spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
399 : uint32_t cdw11, void *payload,
400 : uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
401 : void *cb_arg, uint32_t ns_id)
402 : {
403 : struct nvme_request *req;
404 : struct spdk_nvme_cmd *cmd;
405 : int rc;
406 :
407 1 : nvme_ctrlr_lock(ctrlr);
408 1 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
409 : false);
410 1 : if (req == NULL) {
411 0 : nvme_ctrlr_unlock(ctrlr);
412 0 : return -ENOMEM;
413 : }
414 :
415 1 : cmd = &req->cmd;
416 1 : cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
417 1 : cmd->cdw10_bits.get_features.fid = feature;
418 1 : cmd->cdw11 = cdw11;
419 1 : cmd->nsid = ns_id;
420 :
421 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
422 1 : nvme_ctrlr_unlock(ctrlr);
423 :
424 1 : return rc;
425 1 : }
426 :
427 : int
428 1 : spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
429 : uint32_t cdw11, uint32_t cdw12, void *payload,
430 : uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
431 : void *cb_arg, uint32_t ns_id)
432 : {
433 : struct nvme_request *req;
434 : struct spdk_nvme_cmd *cmd;
435 : int rc;
436 :
437 1 : nvme_ctrlr_lock(ctrlr);
438 1 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
439 : true);
440 1 : if (req == NULL) {
441 0 : nvme_ctrlr_unlock(ctrlr);
442 0 : return -ENOMEM;
443 : }
444 :
445 1 : cmd = &req->cmd;
446 1 : cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
447 1 : cmd->cdw10_bits.set_features.fid = feature;
448 1 : cmd->cdw11 = cdw11;
449 1 : cmd->cdw12 = cdw12;
450 1 : cmd->nsid = ns_id;
451 :
452 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
453 1 : nvme_ctrlr_unlock(ctrlr);
454 :
455 1 : return rc;
456 1 : }
457 :
458 : int
459 0 : nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
460 : uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
461 : {
462 : union spdk_nvme_feat_number_of_queues feat_num_queues;
463 :
464 0 : feat_num_queues.raw = 0;
465 0 : feat_num_queues.bits.nsqr = num_queues - 1;
466 0 : feat_num_queues.bits.ncqr = num_queues - 1;
467 :
468 0 : return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
469 : 0,
470 0 : NULL, 0, cb_fn, cb_arg);
471 : }
472 :
473 : int
474 0 : nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
475 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
476 : {
477 0 : return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
478 0 : cb_fn, cb_arg);
479 : }
480 :
481 : int
482 0 : nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
483 : union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
484 : void *cb_arg)
485 : {
486 : uint32_t cdw11;
487 :
488 0 : cdw11 = config.raw;
489 0 : return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
490 : NULL, 0,
491 0 : cb_fn, cb_arg);
492 : }
493 :
494 : int
495 3 : nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
496 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
497 : {
498 : union spdk_nvme_feat_host_identifier feat_host_identifier;
499 :
500 3 : feat_host_identifier.raw = 0;
501 3 : if (host_id_size == 16) {
502 : /* 128-bit extended host identifier */
503 1 : feat_host_identifier.bits.exhid = 1;
504 3 : } else if (host_id_size == 8) {
505 : /* 64-bit host identifier */
506 1 : feat_host_identifier.bits.exhid = 0;
507 1 : } else {
508 1 : SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
509 1 : return -EINVAL;
510 : }
511 :
512 4 : return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
513 2 : feat_host_identifier.raw, 0,
514 2 : host_id, host_id_size, cb_fn, cb_arg);
515 3 : }
516 :
517 : int
518 9 : spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
519 : uint32_t nsid, void *payload, uint32_t payload_size,
520 : uint64_t offset, uint32_t cdw10,
521 : uint32_t cdw11, uint32_t cdw14,
522 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
523 : {
524 : struct nvme_request *req;
525 : struct spdk_nvme_cmd *cmd;
526 : uint32_t numd, numdl, numdu;
527 : uint32_t lpol, lpou;
528 : int rc;
529 :
530 9 : if (payload_size == 0) {
531 0 : return -EINVAL;
532 : }
533 :
534 9 : if (offset & 3) {
535 0 : return -EINVAL;
536 : }
537 :
538 9 : numd = spdk_nvme_bytes_to_numd(payload_size);
539 9 : numdl = numd & 0xFFFFu;
540 9 : numdu = (numd >> 16) & 0xFFFFu;
541 :
542 9 : lpol = (uint32_t)offset;
543 9 : lpou = (uint32_t)(offset >> 32);
544 :
545 9 : nvme_ctrlr_lock(ctrlr);
546 :
547 9 : if (offset && !ctrlr->cdata.lpa.edlp) {
548 0 : nvme_ctrlr_unlock(ctrlr);
549 0 : return -EINVAL;
550 : }
551 :
552 18 : req = nvme_allocate_request_user_copy(ctrlr->adminq,
553 9 : payload, payload_size, cb_fn, cb_arg, false);
554 9 : if (req == NULL) {
555 0 : nvme_ctrlr_unlock(ctrlr);
556 0 : return -ENOMEM;
557 : }
558 :
559 9 : cmd = &req->cmd;
560 9 : cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
561 9 : cmd->nsid = nsid;
562 9 : cmd->cdw10 = cdw10;
563 9 : cmd->cdw10_bits.get_log_page.numdl = numdl;
564 9 : cmd->cdw10_bits.get_log_page.lid = log_page;
565 :
566 9 : cmd->cdw11 = cdw11;
567 9 : cmd->cdw11_bits.get_log_page.numdu = numdu;
568 9 : cmd->cdw12 = lpol;
569 9 : cmd->cdw13 = lpou;
570 9 : cmd->cdw14 = cdw14;
571 :
572 9 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
573 9 : nvme_ctrlr_unlock(ctrlr);
574 :
575 9 : return rc;
576 9 : }
577 :
578 : int
579 9 : spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
580 : uint32_t nsid, void *payload, uint32_t payload_size,
581 : uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
582 : {
583 18 : return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
584 9 : payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
585 : }
586 :
587 : static void
588 0 : nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
589 : {
590 : struct nvme_request *next, *tmp;
591 : int rc;
592 :
593 0 : if (ctrlr->is_resetting || ctrlr->is_destructed || ctrlr->is_failed) {
594 : /* Don't resubmit aborts if ctrlr is failing */
595 0 : return;
596 : }
597 :
598 0 : if (spdk_nvme_ctrlr_get_admin_qp_failure_reason(ctrlr) != SPDK_NVME_QPAIR_FAILURE_NONE) {
599 : /* Don't resubmit aborts if admin qpair is failed */
600 0 : return;
601 : }
602 :
603 0 : STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
604 0 : STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
605 0 : ctrlr->outstanding_aborts++;
606 0 : rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
607 0 : if (rc < 0) {
608 0 : SPDK_ERRLOG("Failed to submit queued abort.\n");
609 0 : memset(&next->cpl, 0, sizeof(next->cpl));
610 0 : next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
611 0 : next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
612 0 : next->cpl.status.dnr = 1;
613 0 : nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
614 0 : } else {
615 : /* If the first abort succeeds, stop iterating. */
616 0 : break;
617 : }
618 0 : }
619 0 : }
620 :
621 : static int
622 2 : _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
623 : struct nvme_request *req)
624 : {
625 : /* ACL is a 0's based value. */
626 2 : if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
627 0 : STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
628 0 : return 0;
629 : } else {
630 2 : ctrlr->outstanding_aborts++;
631 2 : return nvme_ctrlr_submit_admin_request(ctrlr, req);
632 : }
633 2 : }
634 :
635 : static void
636 0 : nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
637 : {
638 0 : struct nvme_request *req = ctx;
639 : struct spdk_nvme_ctrlr *ctrlr;
640 :
641 0 : ctrlr = req->qpair->ctrlr;
642 :
643 0 : assert(ctrlr->outstanding_aborts > 0);
644 0 : ctrlr->outstanding_aborts--;
645 0 : nvme_ctrlr_retry_queued_abort(ctrlr);
646 :
647 0 : req->user_cb_fn(req->user_cb_arg, cpl);
648 0 : }
649 :
650 : int
651 3 : spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
652 : uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
653 : {
654 : int rc;
655 : struct nvme_request *req;
656 : struct spdk_nvme_cmd *cmd;
657 :
658 3 : if (qpair == NULL) {
659 2 : qpair = ctrlr->adminq;
660 2 : }
661 :
662 3 : nvme_ctrlr_lock(ctrlr);
663 3 : req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
664 3 : if (req == NULL) {
665 1 : nvme_ctrlr_unlock(ctrlr);
666 1 : return -ENOMEM;
667 : }
668 2 : req->cb_arg = req;
669 2 : req->user_cb_fn = cb_fn;
670 2 : req->user_cb_arg = cb_arg;
671 :
672 2 : cmd = &req->cmd;
673 2 : cmd->opc = SPDK_NVME_OPC_ABORT;
674 2 : cmd->cdw10_bits.abort.sqid = qpair->id;
675 2 : cmd->cdw10_bits.abort.cid = cid;
676 :
677 2 : rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
678 :
679 2 : nvme_ctrlr_unlock(ctrlr);
680 2 : return rc;
681 3 : }
682 :
683 : static void
684 0 : nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
685 : {
686 0 : struct nvme_request *req = ctx;
687 0 : struct nvme_request *parent = req->parent;
688 : struct spdk_nvme_ctrlr *ctrlr;
689 :
690 0 : ctrlr = req->qpair->ctrlr;
691 :
692 0 : assert(ctrlr->outstanding_aborts > 0);
693 0 : ctrlr->outstanding_aborts--;
694 0 : nvme_ctrlr_retry_queued_abort(ctrlr);
695 :
696 0 : nvme_request_remove_child(parent, req);
697 :
698 0 : if (!spdk_nvme_cpl_is_abort_success(cpl)) {
699 0 : parent->parent_status.cdw0 |= 1U;
700 0 : }
701 :
702 0 : if (parent->num_children == 0) {
703 0 : nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
704 0 : parent, &parent->parent_status);
705 0 : }
706 0 : }
707 :
708 : static int
709 3 : nvme_request_add_abort(struct nvme_request *req, void *arg)
710 : {
711 3 : struct nvme_request *parent = arg;
712 : struct nvme_request *child;
713 : void *cmd_cb_arg;
714 :
715 3 : cmd_cb_arg = parent->user_cb_arg;
716 :
717 3 : if (!nvme_request_abort_match(req, cmd_cb_arg)) {
718 1 : return 0;
719 : }
720 :
721 2 : child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
722 : nvme_complete_abort_request, NULL);
723 2 : if (child == NULL) {
724 1 : return -ENOMEM;
725 : }
726 :
727 1 : child->cb_arg = child;
728 :
729 1 : child->cmd.opc = SPDK_NVME_OPC_ABORT;
730 : /* Copy SQID from the parent. */
731 1 : child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
732 1 : child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
733 :
734 1 : child->parent = parent;
735 :
736 1 : TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
737 1 : parent->num_children++;
738 :
739 1 : return 0;
740 3 : }
741 :
742 : int
743 0 : spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
744 : void *cmd_cb_arg,
745 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
746 : {
747 0 : int rc = 0;
748 : struct nvme_request *parent, *child, *tmp;
749 0 : bool child_failed = false;
750 0 : int aborted = 0;
751 :
752 0 : if (cmd_cb_arg == NULL) {
753 0 : return -EINVAL;
754 : }
755 :
756 0 : nvme_ctrlr_lock(ctrlr);
757 :
758 0 : if (qpair == NULL) {
759 0 : qpair = ctrlr->adminq;
760 0 : }
761 :
762 0 : parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
763 0 : if (parent == NULL) {
764 0 : nvme_ctrlr_unlock(ctrlr);
765 :
766 0 : return -ENOMEM;
767 : }
768 :
769 0 : TAILQ_INIT(&parent->children);
770 0 : parent->num_children = 0;
771 :
772 0 : parent->cmd.opc = SPDK_NVME_OPC_ABORT;
773 0 : memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
774 :
775 : /* Hold SQID that the requests to abort are associated with.
776 : * This will be copied to the children.
777 : *
778 : * CID is not set here because the parent is not submitted directly
779 : * and CID is not determined until request to abort is found.
780 : */
781 0 : parent->cmd.cdw10_bits.abort.sqid = qpair->id;
782 :
783 : /* This is used to find request to abort. */
784 0 : parent->user_cb_arg = cmd_cb_arg;
785 :
786 : /* Add an abort request for each outstanding request which has cmd_cb_arg
787 : * as its callback context.
788 : */
789 0 : rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
790 0 : if (rc != 0) {
791 : /* Free abort requests already added. */
792 0 : child_failed = true;
793 0 : }
794 :
795 0 : TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
796 0 : if (spdk_likely(!child_failed)) {
797 0 : rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
798 0 : if (spdk_unlikely(rc != 0)) {
799 0 : child_failed = true;
800 0 : }
801 0 : } else {
802 : /* Free remaining abort requests. */
803 0 : nvme_request_remove_child(parent, child);
804 0 : nvme_free_request(child);
805 : }
806 0 : }
807 :
808 0 : if (spdk_likely(!child_failed)) {
809 : /* There is no error so far. Abort requests were submitted successfully
810 : * or there was no outstanding request to abort.
811 : *
812 : * Hence abort queued requests which has cmd_cb_arg as its callback
813 : * context next.
814 : */
815 0 : aborted = nvme_qpair_abort_queued_reqs_with_cbarg(qpair, cmd_cb_arg);
816 0 : if (parent->num_children == 0) {
817 : /* There was no outstanding request to abort. */
818 0 : if (aborted > 0) {
819 : /* The queued requests were successfully aborted. Hence
820 : * complete the parent request with success synchronously.
821 : */
822 0 : nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
823 0 : parent, &parent->parent_status);
824 0 : } else {
825 : /* There was no queued request to abort. */
826 0 : rc = -ENOENT;
827 : }
828 0 : }
829 0 : } else {
830 : /* Failed to add or submit abort request. */
831 0 : if (parent->num_children != 0) {
832 : /* Return success since we must wait for those children
833 : * to complete but set the parent request to failure.
834 : */
835 0 : parent->parent_status.cdw0 |= 1U;
836 0 : rc = 0;
837 0 : }
838 : }
839 :
840 0 : if (rc != 0) {
841 0 : nvme_free_request(parent);
842 0 : }
843 :
844 0 : nvme_ctrlr_unlock(ctrlr);
845 0 : return rc;
846 0 : }
847 :
848 : int
849 1 : nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
850 : const struct spdk_nvme_fw_commit *fw_commit,
851 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
852 : {
853 : struct nvme_request *req;
854 : struct spdk_nvme_cmd *cmd;
855 : int rc;
856 :
857 1 : nvme_ctrlr_lock(ctrlr);
858 1 : req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
859 1 : if (req == NULL) {
860 0 : nvme_ctrlr_unlock(ctrlr);
861 0 : return -ENOMEM;
862 : }
863 :
864 1 : cmd = &req->cmd;
865 1 : cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
866 1 : memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
867 :
868 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
869 1 : nvme_ctrlr_unlock(ctrlr);
870 :
871 1 : return rc;
872 :
873 1 : }
874 :
875 : int
876 1 : nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
877 : uint32_t size, uint32_t offset, void *payload,
878 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
879 : {
880 : struct nvme_request *req;
881 : struct spdk_nvme_cmd *cmd;
882 : int rc;
883 :
884 1 : nvme_ctrlr_lock(ctrlr);
885 1 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
886 1 : if (req == NULL) {
887 0 : nvme_ctrlr_unlock(ctrlr);
888 0 : return -ENOMEM;
889 : }
890 :
891 1 : cmd = &req->cmd;
892 1 : cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
893 1 : cmd->cdw10 = spdk_nvme_bytes_to_numd(size);
894 1 : cmd->cdw11 = offset >> 2;
895 :
896 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
897 1 : nvme_ctrlr_unlock(ctrlr);
898 :
899 1 : return rc;
900 1 : }
901 :
902 : int
903 2 : spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
904 : uint16_t spsp, uint8_t nssf, void *payload,
905 : uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
906 : {
907 : struct nvme_request *req;
908 : struct spdk_nvme_cmd *cmd;
909 : int rc;
910 :
911 2 : nvme_ctrlr_lock(ctrlr);
912 4 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
913 2 : cb_fn, cb_arg, false);
914 2 : if (req == NULL) {
915 1 : nvme_ctrlr_unlock(ctrlr);
916 1 : return -ENOMEM;
917 : }
918 :
919 1 : cmd = &req->cmd;
920 1 : cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
921 1 : cmd->cdw10_bits.sec_send_recv.nssf = nssf;
922 1 : cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
923 1 : cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
924 1 : cmd->cdw10_bits.sec_send_recv.secp = secp;
925 1 : cmd->cdw11 = payload_size;
926 :
927 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
928 1 : nvme_ctrlr_unlock(ctrlr);
929 :
930 1 : return rc;
931 2 : }
932 :
933 : int
934 2 : spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
935 : uint16_t spsp, uint8_t nssf, void *payload,
936 : uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
937 : {
938 : struct nvme_request *req;
939 : struct spdk_nvme_cmd *cmd;
940 : int rc;
941 :
942 2 : nvme_ctrlr_lock(ctrlr);
943 4 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
944 2 : cb_fn, cb_arg, true);
945 2 : if (req == NULL) {
946 1 : nvme_ctrlr_unlock(ctrlr);
947 1 : return -ENOMEM;
948 : }
949 :
950 1 : cmd = &req->cmd;
951 1 : cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
952 1 : cmd->cdw10_bits.sec_send_recv.nssf = nssf;
953 1 : cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
954 1 : cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
955 1 : cmd->cdw10_bits.sec_send_recv.secp = secp;
956 1 : cmd->cdw11 = payload_size;
957 :
958 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
959 1 : nvme_ctrlr_unlock(ctrlr);
960 :
961 1 : return rc;
962 2 : }
963 :
964 : int
965 1 : nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
966 : struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
967 : spdk_nvme_cmd_cb cb_fn, void *cb_arg)
968 : {
969 : struct nvme_request *req;
970 : struct spdk_nvme_cmd *cmd;
971 : int rc;
972 :
973 1 : nvme_ctrlr_lock(ctrlr);
974 1 : req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
975 1 : if (req == NULL) {
976 0 : nvme_ctrlr_unlock(ctrlr);
977 0 : return -ENOMEM;
978 : }
979 :
980 1 : cmd = &req->cmd;
981 1 : cmd->opc = SPDK_NVME_OPC_SANITIZE;
982 1 : cmd->nsid = nsid;
983 1 : cmd->cdw11 = cdw11;
984 1 : memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
985 :
986 1 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
987 1 : nvme_ctrlr_unlock(ctrlr);
988 :
989 1 : return rc;
990 1 : }
991 :
992 : static int
993 2 : nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
994 : uint32_t doper, uint32_t dtype, uint32_t dspec,
995 : void *payload, uint32_t payload_size, uint32_t cdw12,
996 : uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
997 : uint16_t opc_type, bool host_to_ctrlr)
998 : {
999 2 : struct nvme_request *req = NULL;
1000 2 : struct spdk_nvme_cmd *cmd = NULL;
1001 : int rc;
1002 :
1003 2 : nvme_ctrlr_lock(ctrlr);
1004 4 : req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
1005 2 : cb_fn, cb_arg, host_to_ctrlr);
1006 2 : if (req == NULL) {
1007 0 : nvme_ctrlr_unlock(ctrlr);
1008 0 : return -ENOMEM;
1009 : }
1010 2 : cmd = &req->cmd;
1011 2 : cmd->opc = opc_type;
1012 2 : cmd->nsid = nsid;
1013 :
1014 2 : if ((payload_size >> 2) > 0) {
1015 0 : cmd->cdw10 = (payload_size >> 2) - 1;
1016 0 : }
1017 2 : cmd->cdw11_bits.directive.doper = doper;
1018 2 : cmd->cdw11_bits.directive.dtype = dtype;
1019 2 : cmd->cdw11_bits.directive.dspec = dspec;
1020 2 : cmd->cdw12 = cdw12;
1021 2 : cmd->cdw13 = cdw13;
1022 2 : rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
1023 2 : nvme_ctrlr_unlock(ctrlr);
1024 :
1025 2 : return rc;
1026 2 : }
1027 :
1028 : int
1029 1 : spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1030 : uint32_t doper, uint32_t dtype, uint32_t dspec,
1031 : void *payload, uint32_t payload_size, uint32_t cdw12,
1032 : uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1033 : {
1034 2 : return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1035 1 : payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1036 : SPDK_NVME_OPC_DIRECTIVE_SEND, true);
1037 : }
1038 :
1039 : int
1040 1 : spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1041 : uint32_t doper, uint32_t dtype, uint32_t dspec,
1042 : void *payload, uint32_t payload_size, uint32_t cdw12,
1043 : uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1044 : {
1045 2 : return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1046 1 : payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1047 : SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false);
1048 : }
|