Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / share / examples / scsi_target / scsi_target.c
blobc9686f955db0de2d7a1281ed2833979e148ea756
1 /*
2 * SCSI Disk Emulator
4 * Copyright (c) 2002 Nate Lawson.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 * $FreeBSD$
31 #include <sys/types.h>
32 #include <ctype.h>
33 #include <errno.h>
34 #include <err.h>
35 #include <fcntl.h>
36 #include <signal.h>
37 #include <stddef.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sysexits.h>
42 #include <unistd.h>
43 #include <aio.h>
44 #include <assert.h>
45 #include <sys/stat.h>
46 #include <sys/queue.h>
47 #include <sys/event.h>
48 #include <sys/param.h>
49 #include <sys/disk.h>
50 #include <cam/cam_queue.h>
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_targetio.h>
53 #include <cam/scsi/scsi_message.h>
54 #include "scsi_target.h"
56 /* Maximum amount to transfer per CTIO */
57 #define MAX_XFER MAXPHYS
58 /* Maximum number of allocated CTIOs */
59 #define MAX_CTIOS 64
60 /* Maximum sector size for emulated volume */
61 #define MAX_SECTOR 32768
63 /* Global variables */
64 int debug;
65 int notaio = 0;
66 off_t volume_size;
67 u_int sector_size;
68 size_t buf_size;
70 /* Local variables */
71 static int targ_fd;
72 static int kq_fd;
73 static int file_fd;
74 static int num_ctios;
75 static struct ccb_queue pending_queue;
76 static struct ccb_queue work_queue;
77 static struct ioc_enable_lun ioc_enlun = {
78 CAM_BUS_WILDCARD,
79 CAM_TARGET_WILDCARD,
80 CAM_LUN_WILDCARD
83 /* Local functions */
84 static void cleanup(void);
85 static int init_ccbs(void);
86 static void request_loop(void);
87 static void handle_read(void);
88 /* static int work_atio(struct ccb_accept_tio *); */
89 static void queue_io(struct ccb_scsiio *);
90 static int run_queue(struct ccb_accept_tio *);
91 static int work_inot(struct ccb_immed_notify *);
92 static struct ccb_scsiio *
93 get_ctio(void);
94 /* static void free_ccb(union ccb *); */
95 static cam_status get_sim_flags(u_int16_t *);
96 static void rel_simq(void);
97 static void abort_all_pending(void);
98 static void usage(void);
101 main(int argc, char *argv[])
103 int ch, unit;
104 char *file_name, targname[16];
105 u_int16_t req_flags, sim_flags;
106 off_t user_size;
108 /* Initialize */
109 debug = 0;
110 req_flags = sim_flags = 0;
111 user_size = 0;
112 targ_fd = file_fd = kq_fd = -1;
113 num_ctios = 0;
114 sector_size = SECTOR_SIZE;
115 buf_size = DFLTPHYS;
117 /* Prepare resource pools */
118 TAILQ_INIT(&pending_queue);
119 TAILQ_INIT(&work_queue);
121 while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
122 switch(ch) {
123 case 'A':
124 req_flags |= SID_Addr16;
125 break;
126 case 'd':
127 debug = 1;
128 break;
129 case 'S':
130 req_flags |= SID_Sync;
131 break;
132 case 'T':
133 req_flags |= SID_CmdQue;
134 break;
135 case 'b':
136 buf_size = atoi(optarg);
137 if (buf_size < 256 || buf_size > MAX_XFER)
138 errx(1, "Unreasonable buf size: %s", optarg);
139 break;
140 case 'c':
141 sector_size = atoi(optarg);
142 if (sector_size < 512 || sector_size > MAX_SECTOR)
143 errx(1, "Unreasonable sector size: %s", optarg);
144 break;
145 case 's':
147 int last, shift = 0;
149 last = strlen(optarg) - 1;
150 if (last > 0) {
151 switch (tolower(optarg[last])) {
152 case 'e':
153 shift += 10;
154 /* FALLTHROUGH */
155 case 'p':
156 shift += 10;
157 /* FALLTHROUGH */
158 case 't':
159 shift += 10;
160 /* FALLTHROUGH */
161 case 'g':
162 shift += 10;
163 /* FALLTHROUGH */
164 case 'm':
165 shift += 10;
166 /* FALLTHROUGH */
167 case 'k':
168 shift += 10;
169 optarg[last] = 0;
170 break;
173 user_size = strtoll(optarg, (char **)NULL, /*base*/10);
174 user_size <<= shift;
175 if (user_size < 0)
176 errx(1, "Unreasonable volume size: %s", optarg);
177 break;
179 case 'W':
180 req_flags &= ~(SID_WBus16 | SID_WBus32);
181 switch (atoi(optarg)) {
182 case 8:
183 /* Leave req_flags zeroed */
184 break;
185 case 16:
186 req_flags |= SID_WBus16;
187 break;
188 case 32:
189 req_flags |= SID_WBus32;
190 break;
191 default:
192 warnx("Width %s not supported", optarg);
193 usage();
194 /* NOTREACHED */
196 break;
197 case 'Y':
198 notaio = 1;
199 break;
200 default:
201 usage();
202 /* NOTREACHED */
205 argc -= optind;
206 argv += optind;
208 if (argc != 2)
209 usage();
211 sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
212 &ioc_enlun.lun_id);
213 file_name = argv[1];
215 if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
216 ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
217 ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
218 warnx("Incomplete target path specified");
219 usage();
220 /* NOTREACHED */
222 /* We don't support any vendor-specific commands */
223 ioc_enlun.grp6_len = 0;
224 ioc_enlun.grp7_len = 0;
226 /* Open backing store for IO */
227 file_fd = open(file_name, O_RDWR);
228 if (file_fd < 0)
229 err(1, "open backing store file");
231 /* Check backing store size or use the size user gave us */
232 if (user_size == 0) {
233 struct stat st;
235 if (fstat(file_fd, &st) < 0)
236 err(1, "fstat file");
237 #if __FreeBSD_version >= 500000
238 if ((st.st_mode & S_IFCHR) != 0) {
239 /* raw device */
240 off_t mediasize;
241 if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
242 err(1, "DIOCGMEDIASIZE");
244 /* XXX get sector size by ioctl()?? */
245 volume_size = mediasize / sector_size;
246 } else
247 #endif
248 volume_size = st.st_size / sector_size;
249 } else {
250 volume_size = user_size / sector_size;
252 if (debug)
253 warnx("volume_size: %d bytes x " OFF_FMT " sectors",
254 sector_size, volume_size);
256 if (volume_size <= 0)
257 errx(1, "volume must be larger than %d", sector_size);
259 if (notaio == 0) {
260 struct aiocb aio, *aiop;
262 /* See if we have we have working AIO support */
263 memset(&aio, 0, sizeof(aio));
264 aio.aio_buf = malloc(sector_size);
265 if (aio.aio_buf == NULL)
266 err(1, "malloc");
267 aio.aio_fildes = file_fd;
268 aio.aio_offset = 0;
269 aio.aio_nbytes = sector_size;
270 signal(SIGSYS, SIG_IGN);
271 if (aio_read(&aio) != 0) {
272 printf("AIO support is not available- switchin to"
273 " single-threaded mode.\n");
274 notaio = 1;
275 } else {
276 if (aio_waitcomplete(&aiop, NULL) != sector_size)
277 err(1, "aio_waitcomplete");
278 assert(aiop == &aio);
279 signal(SIGSYS, SIG_DFL);
281 free((void *)aio.aio_buf);
282 if (debug && notaio == 0)
283 warnx("aio support tested ok");
286 /* Go through all the control devices and find one that isn't busy. */
287 unit = 0;
288 do {
289 snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
290 targ_fd = open(targname, O_RDWR);
291 } while (targ_fd < 0 && errno == EBUSY);
293 if (targ_fd < 0)
294 err(1, "Tried to open %d devices, none available", unit);
296 /* The first three are handled by kevent() later */
297 signal(SIGHUP, SIG_IGN);
298 signal(SIGINT, SIG_IGN);
299 signal(SIGTERM, SIG_IGN);
300 signal(SIGPROF, SIG_IGN);
301 signal(SIGALRM, SIG_IGN);
302 signal(SIGSTOP, SIG_IGN);
303 signal(SIGTSTP, SIG_IGN);
305 /* Register a cleanup handler to run when exiting */
306 atexit(cleanup);
308 /* Enable listening on the specified LUN */
309 if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
310 err(1, "TARGIOCENABLE");
312 /* Enable debugging if requested */
313 if (debug) {
314 if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
315 warnx("TARGIOCDEBUG");
318 /* Set up inquiry data according to what SIM supports */
319 if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
320 errx(1, "get_sim_flags");
321 if (tcmd_init(req_flags, sim_flags) != 0)
322 errx(1, "Initializing tcmd subsystem failed");
324 /* Queue ATIOs and INOTs on descriptor */
325 if (init_ccbs() != 0)
326 errx(1, "init_ccbs failed");
328 if (debug)
329 warnx("main loop beginning");
330 request_loop();
332 exit(0);
335 static void
336 cleanup()
338 struct ccb_hdr *ccb_h;
340 if (debug) {
341 warnx("cleanup called");
342 debug = 0;
343 ioctl(targ_fd, TARGIOCDEBUG, &debug);
345 ioctl(targ_fd, TARGIOCDISABLE, NULL);
346 close(targ_fd);
348 while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
349 TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
350 free_ccb((union ccb *)ccb_h);
352 while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
353 TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
354 free_ccb((union ccb *)ccb_h);
357 if (kq_fd != -1)
358 close(kq_fd);
361 /* Allocate ATIOs/INOTs and queue on HBA */
362 static int
363 init_ccbs()
365 int i;
367 for (i = 0; i < MAX_INITIATORS; i++) {
368 struct ccb_accept_tio *atio;
369 struct atio_descr *a_descr;
370 struct ccb_immed_notify *inot;
372 atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
373 if (atio == NULL) {
374 warn("malloc ATIO");
375 return (-1);
377 a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
378 if (a_descr == NULL) {
379 free(atio);
380 warn("malloc atio_descr");
381 return (-1);
383 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
384 atio->ccb_h.targ_descr = a_descr;
385 send_ccb((union ccb *)atio, /*priority*/1);
387 inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
388 if (inot == NULL) {
389 warn("malloc INOT");
390 return (-1);
392 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
393 send_ccb((union ccb *)inot, /*priority*/1);
396 return (0);
399 static void
400 request_loop()
402 struct kevent events[MAX_EVENTS];
403 struct timespec ts, *tptr;
404 int quit;
406 /* Register kqueue for event notification */
407 if ((kq_fd = kqueue()) < 0)
408 err(1, "init kqueue");
410 /* Set up some default events */
411 EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
412 EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
413 EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
414 EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
415 if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
416 err(1, "kevent signal registration");
418 ts.tv_sec = 0;
419 ts.tv_nsec = 0;
420 tptr = NULL;
421 quit = 0;
423 /* Loop until user signal */
424 while (quit == 0) {
425 int retval, i, oo;
426 struct ccb_hdr *ccb_h;
428 /* Check for the next signal, read ready, or AIO completion */
429 retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
430 if (retval < 0) {
431 if (errno == EINTR) {
432 if (debug)
433 warnx("EINTR, looping");
434 continue;
436 else {
437 err(1, "kevent failed");
439 } else if (retval > MAX_EVENTS) {
440 errx(1, "kevent returned more events than allocated?");
443 /* Process all received events. */
444 for (oo = i = 0; i < retval; i++) {
445 if ((events[i].flags & EV_ERROR) != 0)
446 errx(1, "kevent registration failed");
448 switch (events[i].filter) {
449 case EVFILT_READ:
450 if (debug)
451 warnx("read ready");
452 handle_read();
453 break;
454 case EVFILT_AIO:
456 struct ccb_scsiio *ctio;
457 struct ctio_descr *c_descr;
458 if (debug)
459 warnx("aio ready");
461 ctio = (struct ccb_scsiio *)events[i].udata;
462 c_descr = (struct ctio_descr *)
463 ctio->ccb_h.targ_descr;
464 c_descr->event = AIO_DONE;
465 /* Queue on the appropriate ATIO */
466 queue_io(ctio);
467 /* Process any queued completions. */
468 oo += run_queue(c_descr->atio);
469 break;
471 case EVFILT_SIGNAL:
472 if (debug)
473 warnx("signal ready, setting quit");
474 quit = 1;
475 break;
476 default:
477 warnx("unknown event %d", events[i].filter);
478 break;
481 if (debug)
482 warnx("event %d done", events[i].filter);
485 if (oo) {
486 tptr = &ts;
487 continue;
490 /* Grab the first CCB and perform one work unit. */
491 if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
492 union ccb *ccb;
494 ccb = (union ccb *)ccb_h;
495 switch (ccb_h->func_code) {
496 case XPT_ACCEPT_TARGET_IO:
497 /* Start one more transfer. */
498 retval = work_atio(&ccb->atio);
499 break;
500 case XPT_IMMED_NOTIFY:
501 retval = work_inot(&ccb->cin);
502 break;
503 default:
504 warnx("Unhandled ccb type %#x on workq",
505 ccb_h->func_code);
506 abort();
507 /* NOTREACHED */
510 /* Assume work function handled the exception */
511 if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
512 if (debug) {
513 warnx("Queue frozen receiving CCB, "
514 "releasing");
516 rel_simq();
519 /* No more work needed for this command. */
520 if (retval == 0) {
521 TAILQ_REMOVE(&work_queue, ccb_h,
522 periph_links.tqe);
527 * Poll for new events (i.e. completions) while we
528 * are processing CCBs on the work_queue. Once it's
529 * empty, use an infinite wait.
531 if (!TAILQ_EMPTY(&work_queue))
532 tptr = &ts;
533 else
534 tptr = NULL;
538 /* CCBs are ready from the kernel */
539 static void
540 handle_read()
542 union ccb *ccb_array[MAX_INITIATORS], *ccb;
543 int ccb_count, i, oo;
545 ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
546 if (ccb_count <= 0) {
547 warn("read ccb ptrs");
548 return;
550 ccb_count /= sizeof(union ccb *);
551 if (ccb_count < 1) {
552 warnx("truncated read ccb ptr?");
553 return;
556 for (i = 0; i < ccb_count; i++) {
557 ccb = ccb_array[i];
558 TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
560 switch (ccb->ccb_h.func_code) {
561 case XPT_ACCEPT_TARGET_IO:
563 struct ccb_accept_tio *atio;
564 struct atio_descr *a_descr;
566 /* Initialize ATIO descr for this transaction */
567 atio = &ccb->atio;
568 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
569 bzero(a_descr, sizeof(*a_descr));
570 TAILQ_INIT(&a_descr->cmplt_io);
571 a_descr->flags = atio->ccb_h.flags &
572 (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
573 /* XXX add a_descr->priority */
574 if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
575 a_descr->cdb = atio->cdb_io.cdb_bytes;
576 else
577 a_descr->cdb = atio->cdb_io.cdb_ptr;
579 /* ATIOs are processed in FIFO order */
580 TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
581 periph_links.tqe);
582 break;
584 case XPT_CONT_TARGET_IO:
586 struct ccb_scsiio *ctio;
587 struct ctio_descr *c_descr;
589 ctio = &ccb->ctio;
590 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
591 c_descr->event = CTIO_DONE;
592 /* Queue on the appropriate ATIO */
593 queue_io(ctio);
594 /* Process any queued completions. */
595 oo += run_queue(c_descr->atio);
596 break;
598 case XPT_IMMED_NOTIFY:
599 /* INOTs are handled with priority */
600 TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
601 periph_links.tqe);
602 break;
603 default:
604 warnx("Unhandled ccb type %#x in handle_read",
605 ccb->ccb_h.func_code);
606 break;
611 /* Process an ATIO CCB from the kernel */
613 work_atio(struct ccb_accept_tio *atio)
615 struct ccb_scsiio *ctio;
616 struct atio_descr *a_descr;
617 struct ctio_descr *c_descr;
618 cam_status status;
619 int ret;
621 if (debug)
622 warnx("Working on ATIO %p", atio);
624 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
626 /* Get a CTIO and initialize it according to our known parameters */
627 ctio = get_ctio();
628 if (ctio == NULL) {
629 return (1);
631 ret = 0;
632 ctio->ccb_h.flags = a_descr->flags;
633 ctio->tag_id = atio->tag_id;
634 ctio->init_id = atio->init_id;
635 /* XXX priority needs to be added to a_descr */
636 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
637 c_descr->atio = atio;
638 if ((a_descr->flags & CAM_DIR_IN) != 0)
639 c_descr->offset = a_descr->base_off + a_descr->targ_req;
640 else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
641 c_descr->offset = a_descr->base_off + a_descr->init_req;
642 else
643 c_descr->offset = a_descr->base_off;
646 * Return a check condition if there was an error while
647 * receiving this ATIO.
649 if (atio->sense_len != 0) {
650 struct scsi_sense_data *sense;
652 if (debug) {
653 warnx("ATIO with %u bytes sense received",
654 atio->sense_len);
656 sense = &atio->sense_data;
657 tcmd_sense(ctio->init_id, ctio, sense->flags,
658 sense->add_sense_code, sense->add_sense_code_qual);
659 send_ccb((union ccb *)ctio, /*priority*/1);
660 return (0);
663 status = atio->ccb_h.status & CAM_STATUS_MASK;
664 switch (status) {
665 case CAM_CDB_RECVD:
666 ret = tcmd_handle(atio, ctio, ATIO_WORK);
667 break;
668 case CAM_REQ_ABORTED:
669 warn("ATIO %p aborted", a_descr);
670 /* Requeue on HBA */
671 TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
672 send_ccb((union ccb *)atio, /*priority*/1);
673 ret = 1;
674 break;
675 default:
676 warnx("ATIO completed with unhandled status %#x", status);
677 abort();
678 /* NOTREACHED */
679 break;
682 return (ret);
685 static void
686 queue_io(struct ccb_scsiio *ctio)
688 struct ccb_hdr *ccb_h;
689 struct io_queue *ioq;
690 struct ctio_descr *c_descr;
692 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
693 if (c_descr->atio == NULL) {
694 errx(1, "CTIO %p has NULL ATIO", ctio);
696 ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
698 if (TAILQ_EMPTY(ioq)) {
699 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
700 return;
703 TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
704 struct ctio_descr *curr_descr =
705 (struct ctio_descr *)ccb_h->targ_descr;
706 if (curr_descr->offset <= c_descr->offset) {
707 break;
711 if (ccb_h) {
712 TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
713 } else {
714 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
719 * Go through all completed AIO/CTIOs for a given ATIO and advance data
720 * counts, start continuation IO, etc.
722 static int
723 run_queue(struct ccb_accept_tio *atio)
725 struct atio_descr *a_descr;
726 struct ccb_hdr *ccb_h;
727 int sent_status, event;
729 if (atio == NULL)
730 return (0);
732 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
734 while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
735 struct ccb_scsiio *ctio;
736 struct ctio_descr *c_descr;
738 ctio = (struct ccb_scsiio *)ccb_h;
739 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
741 if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
742 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
743 periph_links.tqe);
744 free_ccb((union ccb *)ctio);
745 send_ccb((union ccb *)atio, /*priority*/1);
746 continue;
749 /* If completed item is in range, call handler */
750 if ((c_descr->event == AIO_DONE &&
751 c_descr->offset == a_descr->base_off + a_descr->targ_ack)
752 || (c_descr->event == CTIO_DONE &&
753 c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
754 sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
755 event = c_descr->event;
757 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
758 periph_links.tqe);
759 tcmd_handle(atio, ctio, c_descr->event);
761 /* If entire transfer complete, send back ATIO */
762 if (sent_status != 0 && event == CTIO_DONE)
763 send_ccb((union ccb *)atio, /*priority*/1);
764 } else {
765 /* Gap in offsets so wait until later callback */
766 if (/* debug */ 1)
767 warnx("IO %p:%p out of order %s", ccb_h,
768 a_descr, c_descr->event == AIO_DONE?
769 "aio" : "ctio");
770 return (1);
773 return (0);
776 static int
777 work_inot(struct ccb_immed_notify *inot)
779 cam_status status;
780 int sense;
782 if (debug)
783 warnx("Working on INOT %p", inot);
785 status = inot->ccb_h.status;
786 sense = (status & CAM_AUTOSNS_VALID) != 0;
787 status &= CAM_STATUS_MASK;
789 switch (status) {
790 case CAM_SCSI_BUS_RESET:
791 tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
792 abort_all_pending();
793 break;
794 case CAM_BDR_SENT:
795 tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
796 abort_all_pending();
797 break;
798 case CAM_MESSAGE_RECV:
799 switch (inot->message_args[0]) {
800 case MSG_TASK_COMPLETE:
801 case MSG_INITIATOR_DET_ERR:
802 case MSG_ABORT_TASK_SET:
803 case MSG_MESSAGE_REJECT:
804 case MSG_NOOP:
805 case MSG_PARITY_ERROR:
806 case MSG_TARGET_RESET:
807 case MSG_ABORT_TASK:
808 case MSG_CLEAR_TASK_SET:
809 default:
810 warnx("INOT message %#x", inot->message_args[0]);
811 break;
813 break;
814 case CAM_REQ_ABORTED:
815 warnx("INOT %p aborted", inot);
816 break;
817 default:
818 warnx("Unhandled INOT status %#x", status);
819 break;
822 /* If there is sense data, use it */
823 if (sense != 0) {
824 struct scsi_sense_data *sense;
826 sense = &inot->sense_data;
827 tcmd_sense(inot->initiator_id, NULL, sense->flags,
828 sense->add_sense_code, sense->add_sense_code_qual);
829 if (debug)
830 warnx("INOT has sense: %#x", sense->flags);
833 /* Requeue on SIM */
834 TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
835 send_ccb((union ccb *)inot, /*priority*/1);
837 return (1);
840 void
841 send_ccb(union ccb *ccb, int priority)
843 if (debug)
844 warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
845 ccb->ccb_h.pinfo.priority = priority;
846 if (XPT_FC_IS_QUEUED(ccb)) {
847 TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
848 periph_links.tqe);
850 if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
851 warn("write ccb");
852 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
856 /* Return a CTIO/descr/buf combo from the freelist or malloc one */
857 static struct ccb_scsiio *
858 get_ctio()
860 struct ccb_scsiio *ctio;
861 struct ctio_descr *c_descr;
862 struct sigevent *se;
864 if (num_ctios == MAX_CTIOS) {
865 warnx("at CTIO max");
866 return (NULL);
869 ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
870 if (ctio == NULL) {
871 warn("malloc CTIO");
872 return (NULL);
874 c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
875 if (c_descr == NULL) {
876 free(ctio);
877 warn("malloc ctio_descr");
878 return (NULL);
880 c_descr->buf = malloc(buf_size);
881 if (c_descr->buf == NULL) {
882 free(c_descr);
883 free(ctio);
884 warn("malloc backing store");
885 return (NULL);
887 num_ctios++;
889 /* Initialize CTIO, CTIO descr, and AIO */
890 ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
891 ctio->ccb_h.retry_count = 2;
892 ctio->ccb_h.timeout = CAM_TIME_INFINITY;
893 ctio->data_ptr = c_descr->buf;
894 ctio->ccb_h.targ_descr = c_descr;
895 c_descr->aiocb.aio_buf = c_descr->buf;
896 c_descr->aiocb.aio_fildes = file_fd;
897 se = &c_descr->aiocb.aio_sigevent;
898 se->sigev_notify = SIGEV_KEVENT;
899 se->sigev_notify_kqueue = kq_fd;
900 se->sigev_value.sival_ptr = ctio;
902 return (ctio);
905 void
906 free_ccb(union ccb *ccb)
908 switch (ccb->ccb_h.func_code) {
909 case XPT_CONT_TARGET_IO:
911 struct ctio_descr *c_descr;
913 c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
914 free(c_descr->buf);
915 num_ctios--;
916 /* FALLTHROUGH */
918 case XPT_ACCEPT_TARGET_IO:
919 free(ccb->ccb_h.targ_descr);
920 /* FALLTHROUGH */
921 case XPT_IMMED_NOTIFY:
922 default:
923 free(ccb);
924 break;
928 static cam_status
929 get_sim_flags(u_int16_t *flags)
931 struct ccb_pathinq cpi;
932 cam_status status;
934 /* Find SIM capabilities */
935 bzero(&cpi, sizeof(cpi));
936 cpi.ccb_h.func_code = XPT_PATH_INQ;
937 send_ccb((union ccb *)&cpi, /*priority*/1);
938 status = cpi.ccb_h.status & CAM_STATUS_MASK;
939 if (status != CAM_REQ_CMP) {
940 fprintf(stderr, "CPI failed, status %#x\n", status);
941 return (status);
944 /* Can only enable on controllers that support target mode */
945 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
946 fprintf(stderr, "HBA does not support target mode\n");
947 status = CAM_PATH_INVALID;
948 return (status);
951 *flags = cpi.hba_inquiry;
952 return (status);
955 static void
956 rel_simq()
958 struct ccb_relsim crs;
960 bzero(&crs, sizeof(crs));
961 crs.ccb_h.func_code = XPT_REL_SIMQ;
962 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
963 crs.openings = 0;
964 crs.release_timeout = 0;
965 crs.qfrozen_cnt = 0;
966 send_ccb((union ccb *)&crs, /*priority*/0);
969 /* Cancel all pending CCBs. */
970 static void
971 abort_all_pending()
973 struct ccb_abort cab;
974 struct ccb_hdr *ccb_h;
976 if (debug)
977 warnx("abort_all_pending");
979 bzero(&cab, sizeof(cab));
980 cab.ccb_h.func_code = XPT_ABORT;
981 TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
982 if (debug)
983 warnx("Aborting pending CCB %p\n", ccb_h);
984 cab.abort_ccb = (union ccb *)ccb_h;
985 send_ccb((union ccb *)&cab, /*priority*/1);
986 if (cab.ccb_h.status != CAM_REQ_CMP) {
987 warnx("Unable to abort CCB, status %#x\n",
988 cab.ccb_h.status);
993 static void
994 usage()
996 fprintf(stderr,
997 "Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
998 "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
999 "\t\tbus:target:lun filename\n");
1000 exit(1);