2 * IBM eServer i/pSeries Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
7 * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_transport_srp.h>
29 #include <scsi/scsi_tgt.h>
30 #include <scsi/libsrp.h>
31 #include <asm/hvcall.h>
32 #include <asm/iommu.h>
38 #define INITIAL_SRP_LIMIT 16
39 #define DEFAULT_MAX_SECTORS 256
41 #define TGT_NAME "ibmvstgt"
46 #define h_copy_rdma(l, sa, sb, da, db) \
47 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
48 #define h_send_crq(ua, l, h) \
49 plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
50 #define h_reg_crq(ua, tok, sz)\
51 plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
52 #define h_free_crq(ua) \
53 plpar_hcall_norets(H_FREE_CRQ, ua);
55 /* tmp - will replace with SCSI logging stuff */
56 #define eprintk(fmt, args...) \
58 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
60 /* #define dprintk eprintk */
61 #define dprintk(fmt, args...)
64 struct vio_dev
*dma_dev
;
66 struct crq_queue crq_queue
;
67 struct work_struct crq_work
;
71 struct srp_target
*target
;
73 struct srp_rport
*rport
;
76 static struct workqueue_struct
*vtgtd
;
77 static struct scsi_transport_template
*ibmvstgt_transport_template
;
80 * These are fixed for the system and come from the Open Firmware device tree.
81 * We just store them here to save getting them every time.
83 static char system_id
[64] = "";
84 static char partition_name
[97] = "UNKNOWN";
85 static unsigned int partition_number
= -1;
87 static struct vio_port
*target_to_port(struct srp_target
*target
)
89 return (struct vio_port
*) target
->ldata
;
92 static inline union viosrp_iu
*vio_iu(struct iu_entry
*iue
)
94 return (union viosrp_iu
*) (iue
->sbuf
->buf
);
97 static int send_iu(struct iu_entry
*iue
, uint64_t length
, uint8_t format
)
99 struct srp_target
*target
= iue
->target
;
100 struct vio_port
*vport
= target_to_port(target
);
103 struct viosrp_crq cooked
;
107 /* First copy the SRP */
108 rc
= h_copy_rdma(length
, vport
->liobn
, iue
->sbuf
->dma
,
109 vport
->riobn
, iue
->remote_token
);
112 eprintk("Error %ld transferring data\n", rc
);
114 crq
.cooked
.valid
= 0x80;
115 crq
.cooked
.format
= format
;
116 crq
.cooked
.reserved
= 0x00;
117 crq
.cooked
.timeout
= 0x00;
118 crq
.cooked
.IU_length
= length
;
119 crq
.cooked
.IU_data_ptr
= vio_iu(iue
)->srp
.rsp
.tag
;
122 crq
.cooked
.status
= 0x99; /* Just needs to be non-zero */
124 crq
.cooked
.status
= 0x00;
126 rc1
= h_send_crq(vport
->dma_dev
->unit_address
, crq
.raw
[0], crq
.raw
[1]);
129 eprintk("%ld sending response\n", rc1
);
136 #define SRP_RSP_SENSE_DATA_LEN 18
138 static int send_rsp(struct iu_entry
*iue
, struct scsi_cmnd
*sc
,
139 unsigned char status
, unsigned char asc
)
141 union viosrp_iu
*iu
= vio_iu(iue
);
142 uint64_t tag
= iu
->srp
.rsp
.tag
;
144 /* If the linked bit is on and status is good */
145 if (test_bit(V_LINKED
, &iue
->flags
) && (status
== NO_SENSE
))
148 memset(iu
, 0, sizeof(struct srp_rsp
));
149 iu
->srp
.rsp
.opcode
= SRP_RSP
;
150 iu
->srp
.rsp
.req_lim_delta
= 1;
151 iu
->srp
.rsp
.tag
= tag
;
153 if (test_bit(V_DIOVER
, &iue
->flags
))
154 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_DIOVER
;
156 iu
->srp
.rsp
.data_in_res_cnt
= 0;
157 iu
->srp
.rsp
.data_out_res_cnt
= 0;
159 iu
->srp
.rsp
.flags
&= ~SRP_RSP_FLAG_RSPVALID
;
161 iu
->srp
.rsp
.resp_data_len
= 0;
162 iu
->srp
.rsp
.status
= status
;
164 uint8_t *sense
= iu
->srp
.rsp
.data
;
167 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_SNSVALID
;
168 iu
->srp
.rsp
.sense_data_len
= SCSI_SENSE_BUFFERSIZE
;
169 memcpy(sense
, sc
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
);
171 iu
->srp
.rsp
.status
= SAM_STAT_CHECK_CONDITION
;
172 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_SNSVALID
;
173 iu
->srp
.rsp
.sense_data_len
= SRP_RSP_SENSE_DATA_LEN
;
175 /* Valid bit and 'current errors' */
176 sense
[0] = (0x1 << 7 | 0x70);
179 /* Additional sense length */
180 sense
[7] = 0xa; /* 10 bytes */
181 /* Additional sense code */
186 send_iu(iue
, sizeof(iu
->srp
.rsp
) + SRP_RSP_SENSE_DATA_LEN
,
192 static void handle_cmd_queue(struct srp_target
*target
)
194 struct Scsi_Host
*shost
= target
->shost
;
195 struct srp_rport
*rport
= target_to_port(target
)->rport
;
196 struct iu_entry
*iue
;
202 spin_lock_irqsave(&target
->lock
, flags
);
204 list_for_each_entry(iue
, &target
->cmd_queue
, ilist
) {
205 if (!test_and_set_bit(V_FLYING
, &iue
->flags
)) {
206 spin_unlock_irqrestore(&target
->lock
, flags
);
207 cmd
= iue
->sbuf
->buf
;
208 err
= srp_cmd_queue(shost
, cmd
, iue
,
209 (unsigned long)rport
, 0);
211 eprintk("cannot queue cmd %p %d\n", cmd
, err
);
218 spin_unlock_irqrestore(&target
->lock
, flags
);
221 static int ibmvstgt_rdma(struct scsi_cmnd
*sc
, struct scatterlist
*sg
, int nsg
,
222 struct srp_direct_buf
*md
, int nmd
,
223 enum dma_data_direction dir
, unsigned int rest
)
225 struct iu_entry
*iue
= (struct iu_entry
*) sc
->SCp
.ptr
;
226 struct srp_target
*target
= iue
->target
;
227 struct vio_port
*vport
= target_to_port(target
);
230 unsigned int done
= 0;
234 token
= sg_dma_address(sg
+ sidx
);
236 for (i
= 0; i
< nmd
&& rest
; i
++) {
237 unsigned int mdone
, mlen
;
239 mlen
= min(rest
, md
[i
].len
);
240 for (mdone
= 0; mlen
;) {
241 int slen
= min(sg_dma_len(sg
+ sidx
) - soff
, mlen
);
243 if (dir
== DMA_TO_DEVICE
)
244 err
= h_copy_rdma(slen
,
250 err
= h_copy_rdma(slen
,
256 if (err
!= H_SUCCESS
) {
257 eprintk("rdma error %d %d %ld\n", dir
, slen
, err
);
266 if (soff
== sg_dma_len(sg
+ sidx
)) {
269 token
= sg_dma_address(sg
+ sidx
);
272 eprintk("out of sg %p %d %d\n",
284 static int ibmvstgt_cmd_done(struct scsi_cmnd
*sc
,
285 void (*done
)(struct scsi_cmnd
*))
288 struct iu_entry
*iue
= (struct iu_entry
*) sc
->SCp
.ptr
;
289 struct srp_target
*target
= iue
->target
;
292 dprintk("%p %p %x %u\n", iue
, target
, vio_iu(iue
)->srp
.cmd
.cdb
[0],
295 if (scsi_sg_count(sc
))
296 err
= srp_transfer_data(sc
, &vio_iu(iue
)->srp
.cmd
, ibmvstgt_rdma
, 1, 1);
298 spin_lock_irqsave(&target
->lock
, flags
);
299 list_del(&iue
->ilist
);
300 spin_unlock_irqrestore(&target
->lock
, flags
);
302 if (err
|| sc
->result
!= SAM_STAT_GOOD
) {
303 eprintk("operation failed %p %d %x\n",
304 iue
, sc
->result
, vio_iu(iue
)->srp
.cmd
.cdb
[0]);
305 send_rsp(iue
, sc
, HARDWARE_ERROR
, 0x00);
307 send_rsp(iue
, sc
, NO_SENSE
, 0x00);
314 int send_adapter_info(struct iu_entry
*iue
,
315 dma_addr_t remote_buffer
, uint16_t length
)
317 struct srp_target
*target
= iue
->target
;
318 struct vio_port
*vport
= target_to_port(target
);
319 struct Scsi_Host
*shost
= target
->shost
;
320 dma_addr_t data_token
;
321 struct mad_adapter_info_data
*info
;
324 info
= dma_alloc_coherent(target
->dev
, sizeof(*info
), &data_token
,
327 eprintk("bad dma_alloc_coherent %p\n", target
);
331 /* Get remote info */
332 err
= h_copy_rdma(sizeof(*info
), vport
->riobn
, remote_buffer
,
333 vport
->liobn
, data_token
);
334 if (err
== H_SUCCESS
) {
335 dprintk("Client connect: %s (%d)\n",
336 info
->partition_name
, info
->partition_number
);
339 memset(info
, 0, sizeof(*info
));
341 strcpy(info
->srp_version
, "16.a");
342 strncpy(info
->partition_name
, partition_name
,
343 sizeof(info
->partition_name
));
344 info
->partition_number
= partition_number
;
345 info
->mad_version
= 1;
347 info
->port_max_txu
[0] = shost
->hostt
->max_sectors
<< 9;
349 /* Send our info to remote */
350 err
= h_copy_rdma(sizeof(*info
), vport
->liobn
, data_token
,
351 vport
->riobn
, remote_buffer
);
353 dma_free_coherent(target
->dev
, sizeof(*info
), info
, data_token
);
355 if (err
!= H_SUCCESS
) {
356 eprintk("Error sending adapter info %d\n", err
);
363 static void process_login(struct iu_entry
*iue
)
365 union viosrp_iu
*iu
= vio_iu(iue
);
366 struct srp_login_rsp
*rsp
= &iu
->srp
.login_rsp
;
367 uint64_t tag
= iu
->srp
.rsp
.tag
;
368 struct Scsi_Host
*shost
= iue
->target
->shost
;
369 struct srp_target
*target
= host_to_srp_target(shost
);
370 struct vio_port
*vport
= target_to_port(target
);
371 struct srp_rport_identifiers ids
;
373 memset(&ids
, 0, sizeof(ids
));
374 sprintf(ids
.port_id
, "%x", vport
->dma_dev
->unit_address
);
375 ids
.roles
= SRP_RPORT_ROLE_INITIATOR
;
377 vport
->rport
= srp_rport_add(shost
, &ids
);
379 /* TODO handle case that requested size is wrong and
380 * buffer format is wrong
382 memset(iu
, 0, sizeof(struct srp_login_rsp
));
383 rsp
->opcode
= SRP_LOGIN_RSP
;
384 rsp
->req_lim_delta
= INITIAL_SRP_LIMIT
;
386 rsp
->max_it_iu_len
= sizeof(union srp_iu
);
387 rsp
->max_ti_iu_len
= sizeof(union srp_iu
);
388 /* direct and indirect */
389 rsp
->buf_fmt
= SRP_BUF_FORMAT_DIRECT
| SRP_BUF_FORMAT_INDIRECT
;
391 send_iu(iue
, sizeof(*rsp
), VIOSRP_SRP_FORMAT
);
394 static inline void queue_cmd(struct iu_entry
*iue
)
396 struct srp_target
*target
= iue
->target
;
399 spin_lock_irqsave(&target
->lock
, flags
);
400 list_add_tail(&iue
->ilist
, &target
->cmd_queue
);
401 spin_unlock_irqrestore(&target
->lock
, flags
);
404 static int process_tsk_mgmt(struct iu_entry
*iue
)
406 union viosrp_iu
*iu
= vio_iu(iue
);
409 dprintk("%p %u\n", iue
, iu
->srp
.tsk_mgmt
.tsk_mgmt_func
);
411 switch (iu
->srp
.tsk_mgmt
.tsk_mgmt_func
) {
412 case SRP_TSK_ABORT_TASK
:
415 case SRP_TSK_ABORT_TASK_SET
:
418 case SRP_TSK_CLEAR_TASK_SET
:
421 case SRP_TSK_LUN_RESET
:
422 fn
= LOGICAL_UNIT_RESET
;
424 case SRP_TSK_CLEAR_ACA
:
431 scsi_tgt_tsk_mgmt_request(iue
->target
->shost
,
432 (unsigned long)iue
->target
->shost
,
434 iu
->srp
.tsk_mgmt
.task_tag
,
435 (struct scsi_lun
*) &iu
->srp
.tsk_mgmt
.lun
,
438 send_rsp(iue
, NULL
, ILLEGAL_REQUEST
, 0x20);
443 static int process_mad_iu(struct iu_entry
*iue
)
445 union viosrp_iu
*iu
= vio_iu(iue
);
446 struct viosrp_adapter_info
*info
;
447 struct viosrp_host_config
*conf
;
449 switch (iu
->mad
.empty_iu
.common
.type
) {
450 case VIOSRP_EMPTY_IU_TYPE
:
451 eprintk("%s\n", "Unsupported EMPTY MAD IU");
453 case VIOSRP_ERROR_LOG_TYPE
:
454 eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
455 iu
->mad
.error_log
.common
.status
= 1;
456 send_iu(iue
, sizeof(iu
->mad
.error_log
), VIOSRP_MAD_FORMAT
);
458 case VIOSRP_ADAPTER_INFO_TYPE
:
459 info
= &iu
->mad
.adapter_info
;
460 info
->common
.status
= send_adapter_info(iue
, info
->buffer
,
461 info
->common
.length
);
462 send_iu(iue
, sizeof(*info
), VIOSRP_MAD_FORMAT
);
464 case VIOSRP_HOST_CONFIG_TYPE
:
465 conf
= &iu
->mad
.host_config
;
466 conf
->common
.status
= 1;
467 send_iu(iue
, sizeof(*conf
), VIOSRP_MAD_FORMAT
);
470 eprintk("Unknown type %u\n", iu
->srp
.rsp
.opcode
);
476 static int process_srp_iu(struct iu_entry
*iue
)
478 union viosrp_iu
*iu
= vio_iu(iue
);
480 u8 opcode
= iu
->srp
.rsp
.opcode
;
487 done
= process_tsk_mgmt(iue
);
501 eprintk("Unsupported type %u\n", opcode
);
504 eprintk("Unknown type %u\n", opcode
);
510 static void process_iu(struct viosrp_crq
*crq
, struct srp_target
*target
)
512 struct vio_port
*vport
= target_to_port(target
);
513 struct iu_entry
*iue
;
517 iue
= srp_iu_get(target
);
519 eprintk("Error getting IU from pool, %p\n", target
);
523 iue
->remote_token
= crq
->IU_data_ptr
;
525 err
= h_copy_rdma(crq
->IU_length
, vport
->riobn
,
526 iue
->remote_token
, vport
->liobn
, iue
->sbuf
->dma
);
528 if (err
!= H_SUCCESS
) {
529 eprintk("%ld transferring data error %p\n", err
, iue
);
533 if (crq
->format
== VIOSRP_MAD_FORMAT
)
534 done
= process_mad_iu(iue
);
536 done
= process_srp_iu(iue
);
542 static irqreturn_t
ibmvstgt_interrupt(int dummy
, void *data
)
544 struct srp_target
*target
= data
;
545 struct vio_port
*vport
= target_to_port(target
);
547 vio_disable_interrupts(vport
->dma_dev
);
548 queue_work(vtgtd
, &vport
->crq_work
);
553 static int crq_queue_create(struct crq_queue
*queue
, struct srp_target
*target
)
556 struct vio_port
*vport
= target_to_port(target
);
558 queue
->msgs
= (struct viosrp_crq
*) get_zeroed_page(GFP_KERNEL
);
561 queue
->size
= PAGE_SIZE
/ sizeof(*queue
->msgs
);
563 queue
->msg_token
= dma_map_single(target
->dev
, queue
->msgs
,
564 queue
->size
* sizeof(*queue
->msgs
),
567 if (dma_mapping_error(queue
->msg_token
))
570 err
= h_reg_crq(vport
->dma_dev
->unit_address
, queue
->msg_token
,
573 /* If the adapter was left active for some reason (like kexec)
574 * try freeing and re-registering
576 if (err
== H_RESOURCE
) {
578 err
= h_free_crq(vport
->dma_dev
->unit_address
);
579 } while (err
== H_BUSY
|| H_IS_LONG_BUSY(err
));
581 err
= h_reg_crq(vport
->dma_dev
->unit_address
, queue
->msg_token
,
585 if (err
!= H_SUCCESS
&& err
!= 2) {
586 eprintk("Error 0x%x opening virtual adapter\n", err
);
590 err
= request_irq(vport
->dma_dev
->irq
, &ibmvstgt_interrupt
,
591 IRQF_DISABLED
, "ibmvstgt", target
);
595 vio_enable_interrupts(vport
->dma_dev
);
597 h_send_crq(vport
->dma_dev
->unit_address
, 0xC001000000000000, 0);
600 spin_lock_init(&queue
->lock
);
606 err
= h_free_crq(vport
->dma_dev
->unit_address
);
607 } while (err
== H_BUSY
|| H_IS_LONG_BUSY(err
));
610 dma_unmap_single(target
->dev
, queue
->msg_token
,
611 queue
->size
* sizeof(*queue
->msgs
), DMA_BIDIRECTIONAL
);
613 free_page((unsigned long) queue
->msgs
);
619 static void crq_queue_destroy(struct srp_target
*target
)
621 struct vio_port
*vport
= target_to_port(target
);
622 struct crq_queue
*queue
= &vport
->crq_queue
;
625 free_irq(vport
->dma_dev
->irq
, target
);
627 err
= h_free_crq(vport
->dma_dev
->unit_address
);
628 } while (err
== H_BUSY
|| H_IS_LONG_BUSY(err
));
630 dma_unmap_single(target
->dev
, queue
->msg_token
,
631 queue
->size
* sizeof(*queue
->msgs
), DMA_BIDIRECTIONAL
);
633 free_page((unsigned long) queue
->msgs
);
636 static void process_crq(struct viosrp_crq
*crq
, struct srp_target
*target
)
638 struct vio_port
*vport
= target_to_port(target
);
639 dprintk("%x %x\n", crq
->valid
, crq
->format
);
641 switch (crq
->valid
) {
644 switch (crq
->format
) {
646 h_send_crq(vport
->dma_dev
->unit_address
,
647 0xC002000000000000, 0);
652 eprintk("Unknown format %u\n", crq
->format
);
656 /* transport event */
660 switch (crq
->format
) {
661 case VIOSRP_SRP_FORMAT
:
662 case VIOSRP_MAD_FORMAT
:
663 process_iu(crq
, target
);
665 case VIOSRP_OS400_FORMAT
:
666 case VIOSRP_AIX_FORMAT
:
667 case VIOSRP_LINUX_FORMAT
:
668 case VIOSRP_INLINE_FORMAT
:
669 eprintk("Unsupported format %u\n", crq
->format
);
672 eprintk("Unknown format %u\n", crq
->format
);
676 eprintk("unknown message type 0x%02x!?\n", crq
->valid
);
680 static inline struct viosrp_crq
*next_crq(struct crq_queue
*queue
)
682 struct viosrp_crq
*crq
;
685 spin_lock_irqsave(&queue
->lock
, flags
);
686 crq
= &queue
->msgs
[queue
->cur
];
687 if (crq
->valid
& 0x80) {
688 if (++queue
->cur
== queue
->size
)
692 spin_unlock_irqrestore(&queue
->lock
, flags
);
697 static void handle_crq(struct work_struct
*work
)
699 struct vio_port
*vport
= container_of(work
, struct vio_port
, crq_work
);
700 struct srp_target
*target
= vport
->target
;
701 struct viosrp_crq
*crq
;
705 while ((crq
= next_crq(&vport
->crq_queue
)) != NULL
) {
706 process_crq(crq
, target
);
710 vio_enable_interrupts(vport
->dma_dev
);
712 crq
= next_crq(&vport
->crq_queue
);
714 vio_disable_interrupts(vport
->dma_dev
);
715 process_crq(crq
, target
);
721 handle_cmd_queue(target
);
725 static int ibmvstgt_eh_abort_handler(struct scsi_cmnd
*sc
)
728 struct iu_entry
*iue
= (struct iu_entry
*) sc
->SCp
.ptr
;
729 struct srp_target
*target
= iue
->target
;
731 dprintk("%p %p %x\n", iue
, target
, vio_iu(iue
)->srp
.cmd
.cdb
[0]);
733 spin_lock_irqsave(&target
->lock
, flags
);
734 list_del(&iue
->ilist
);
735 spin_unlock_irqrestore(&target
->lock
, flags
);
742 static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host
*shost
,
743 u64 itn_id
, u64 mid
, int result
)
745 struct iu_entry
*iue
= (struct iu_entry
*) ((void *) mid
);
746 union viosrp_iu
*iu
= vio_iu(iue
);
747 unsigned char status
, asc
;
749 eprintk("%p %d\n", iue
, result
);
753 switch (iu
->srp
.tsk_mgmt
.tsk_mgmt_func
) {
754 case SRP_TSK_ABORT_TASK
:
757 status
= ABORTED_COMMAND
;
763 send_rsp(iue
, NULL
, status
, asc
);
769 static int ibmvstgt_it_nexus_response(struct Scsi_Host
*shost
, u64 itn_id
,
772 struct srp_target
*target
= host_to_srp_target(shost
);
773 struct vio_port
*vport
= target_to_port(target
);
776 eprintk("%p %d\n", shost
, result
);
777 srp_rport_del(vport
->rport
);
783 static ssize_t
system_id_show(struct class_device
*cdev
, char *buf
)
785 return snprintf(buf
, PAGE_SIZE
, "%s\n", system_id
);
788 static ssize_t
partition_number_show(struct class_device
*cdev
, char *buf
)
790 return snprintf(buf
, PAGE_SIZE
, "%x\n", partition_number
);
793 static ssize_t
unit_address_show(struct class_device
*cdev
, char *buf
)
795 struct Scsi_Host
*shost
= class_to_shost(cdev
);
796 struct srp_target
*target
= host_to_srp_target(shost
);
797 struct vio_port
*vport
= target_to_port(target
);
798 return snprintf(buf
, PAGE_SIZE
, "%x\n", vport
->dma_dev
->unit_address
);
801 static CLASS_DEVICE_ATTR(system_id
, S_IRUGO
, system_id_show
, NULL
);
802 static CLASS_DEVICE_ATTR(partition_number
, S_IRUGO
, partition_number_show
, NULL
);
803 static CLASS_DEVICE_ATTR(unit_address
, S_IRUGO
, unit_address_show
, NULL
);
805 static struct class_device_attribute
*ibmvstgt_attrs
[] = {
806 &class_device_attr_system_id
,
807 &class_device_attr_partition_number
,
808 &class_device_attr_unit_address
,
812 static struct scsi_host_template ibmvstgt_sht
= {
814 .module
= THIS_MODULE
,
815 .can_queue
= INITIAL_SRP_LIMIT
,
816 .sg_tablesize
= SG_ALL
,
817 .use_clustering
= DISABLE_CLUSTERING
,
818 .max_sectors
= DEFAULT_MAX_SECTORS
,
819 .transfer_response
= ibmvstgt_cmd_done
,
820 .eh_abort_handler
= ibmvstgt_eh_abort_handler
,
821 .shost_attrs
= ibmvstgt_attrs
,
822 .proc_name
= TGT_NAME
,
823 .supported_mode
= MODE_TARGET
,
826 static int ibmvstgt_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
828 struct Scsi_Host
*shost
;
829 struct srp_target
*target
;
830 struct vio_port
*vport
;
831 unsigned int *dma
, dma_size
;
834 vport
= kzalloc(sizeof(struct vio_port
), GFP_KERNEL
);
837 shost
= scsi_host_alloc(&ibmvstgt_sht
, sizeof(struct srp_target
));
840 shost
->transportt
= ibmvstgt_transport_template
;
842 target
= host_to_srp_target(shost
);
843 target
->shost
= shost
;
844 vport
->dma_dev
= dev
;
845 target
->ldata
= vport
;
846 vport
->target
= target
;
847 err
= srp_target_alloc(target
, &dev
->dev
, INITIAL_SRP_LIMIT
,
852 dma
= (unsigned int *) vio_get_attribute(dev
, "ibm,my-dma-window",
854 if (!dma
|| dma_size
!= 40) {
855 eprintk("Couldn't get window property %d\n", dma_size
);
857 goto free_srp_target
;
859 vport
->liobn
= dma
[0];
860 vport
->riobn
= dma
[5];
862 INIT_WORK(&vport
->crq_work
, handle_crq
);
864 err
= crq_queue_create(&vport
->crq_queue
, target
);
866 goto free_srp_target
;
868 err
= scsi_add_host(shost
, target
->dev
);
872 err
= scsi_tgt_alloc_queue(shost
);
878 crq_queue_destroy(target
);
880 srp_target_free(target
);
882 scsi_host_put(shost
);
888 static int ibmvstgt_remove(struct vio_dev
*dev
)
890 struct srp_target
*target
= (struct srp_target
*) dev
->dev
.driver_data
;
891 struct Scsi_Host
*shost
= target
->shost
;
892 struct vio_port
*vport
= target
->ldata
;
894 crq_queue_destroy(target
);
895 srp_remove_host(shost
);
896 scsi_remove_host(shost
);
897 scsi_tgt_free_queue(shost
);
898 srp_target_free(target
);
900 scsi_host_put(shost
);
904 static struct vio_device_id ibmvstgt_device_table
[] __devinitdata
= {
905 {"v-scsi-host", "IBM,v-scsi-host"},
909 MODULE_DEVICE_TABLE(vio
, ibmvstgt_device_table
);
911 static struct vio_driver ibmvstgt_driver
= {
912 .id_table
= ibmvstgt_device_table
,
913 .probe
= ibmvstgt_probe
,
914 .remove
= ibmvstgt_remove
,
917 .owner
= THIS_MODULE
,
921 static int get_system_info(void)
923 struct device_node
*rootdn
;
924 const char *id
, *model
, *name
;
925 const unsigned int *num
;
927 rootdn
= of_find_node_by_path("/");
931 model
= of_get_property(rootdn
, "model", NULL
);
932 id
= of_get_property(rootdn
, "system-id", NULL
);
934 snprintf(system_id
, sizeof(system_id
), "%s-%s", model
, id
);
936 name
= of_get_property(rootdn
, "ibm,partition-name", NULL
);
938 strncpy(partition_name
, name
, sizeof(partition_name
));
940 num
= of_get_property(rootdn
, "ibm,partition-no", NULL
);
942 partition_number
= *num
;
948 static struct srp_function_template ibmvstgt_transport_functions
= {
949 .tsk_mgmt_response
= ibmvstgt_tsk_mgmt_response
,
950 .it_nexus_response
= ibmvstgt_it_nexus_response
,
953 static int ibmvstgt_init(void)
957 printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
959 ibmvstgt_transport_template
=
960 srp_attach_transport(&ibmvstgt_transport_functions
);
961 if (!ibmvstgt_transport_template
)
964 vtgtd
= create_workqueue("ibmvtgtd");
966 goto release_transport
;
968 err
= get_system_info();
972 err
= vio_register_driver(&ibmvstgt_driver
);
978 destroy_workqueue(vtgtd
);
980 srp_release_transport(ibmvstgt_transport_template
);
984 static void ibmvstgt_exit(void)
986 printk("Unregister IBM virtual SCSI driver\n");
988 destroy_workqueue(vtgtd
);
989 vio_unregister_driver(&ibmvstgt_driver
);
990 srp_release_transport(ibmvstgt_transport_template
);
993 MODULE_DESCRIPTION("IBM Virtual SCSI Target");
994 MODULE_AUTHOR("Santiago Leon");
995 MODULE_LICENSE("GPL");
997 module_init(ibmvstgt_init
);
998 module_exit(ibmvstgt_exit
);