2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 #include "remote_device.h"
61 #include "remote_node_table.h"
62 #include "registers.h"
63 #include "unsolicited_frame_control.h"
64 #include "probe_roms.h"
67 struct scu_task_context
;
71 * struct sci_power_control -
73 * This structure defines the fields for managing power control for direct
74 * attached disk devices.
76 struct sci_power_control
{
78 * This field is set when the power control timer is running and cleared when
84 * Timer to control when the directed attached disks can consume power.
86 struct sci_timer timer
;
89 * This field is used to keep track of how many phys are put into the
95 * This field is used to keep track of how many phys have been granted to consume power
97 u8 phys_granted_power
;
100 * This field is an array of phys that we are waiting on. The phys are direct
101 * mapped into requesters via struct sci_phy.phy_index
103 struct isci_phy
*requesters
[SCI_MAX_PHYS
];
107 struct sci_port_configuration_agent
;
108 typedef void (*port_config_fn
)(struct isci_host
*,
109 struct sci_port_configuration_agent
*,
110 struct isci_port
*, struct isci_phy
*);
112 struct sci_port_configuration_agent
{
113 u16 phy_configured_mask
;
118 } phy_valid_port_range
[SCI_MAX_PHYS
];
120 port_config_fn link_up_handler
;
121 port_config_fn link_down_handler
;
122 struct sci_timer timer
;
126 * isci_host - primary host/controller object
127 * @timer: timeout start/stop operations
128 * @device_table: rni (hw remote node index) to remote device lookup table
129 * @available_remote_nodes: rni allocator
130 * @power_control: manage device spin up
131 * @io_request_sequence: generation number for tci's (task contexts)
132 * @task_context_table: hw task context table
133 * @remote_node_context_table: hw remote node context table
134 * @completion_queue: hw-producer driver-consumer communication ring
135 * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
136 * @logical_port_entries: min({driver|silicon}-supported-port-count)
137 * @remote_node_entries: min({driver|silicon}-supported-node-count)
138 * @task_context_entries: min({driver|silicon}-supported-task-count)
139 * @phy_timer: phy startup timer
140 * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
141 * the phy index is set so further notifications are not
142 * made. Once the phy reports link up and is made part of a
143 * port then this bit is cleared.
147 struct sci_base_state_machine sm
;
148 /* XXX can we time this externally */
149 struct sci_timer timer
;
150 /* XXX drop reference module params directly */
151 struct sci_user_parameters user_parameters
;
152 /* XXX no need to be a union */
153 struct sci_oem_params oem_parameters
;
154 struct sci_port_configuration_agent port_agent
;
155 struct isci_remote_device
*device_table
[SCI_MAX_REMOTE_DEVICES
];
156 struct sci_remote_node_table available_remote_nodes
;
157 struct sci_power_control power_control
;
158 u8 io_request_sequence
[SCI_MAX_IO_REQUESTS
];
159 struct scu_task_context
*task_context_table
;
160 dma_addr_t task_context_dma
;
161 union scu_remote_node_context
*remote_node_context_table
;
162 u32
*completion_queue
;
163 u32 completion_queue_get
;
164 u32 logical_port_entries
;
165 u32 remote_node_entries
;
166 u32 task_context_entries
;
167 struct sci_unsolicited_frame_control uf_control
;
170 struct sci_timer phy_timer
;
172 bool phy_startup_timer_pending
;
173 u32 next_phy_to_start
;
174 /* XXX convert to unsigned long and use bitops */
177 /* TODO attempt dynamic interrupt coalescing scheme */
178 u16 interrupt_coalesce_number
;
179 u32 interrupt_coalesce_timeout
;
180 struct smu_registers __iomem
*smu_registers
;
181 struct scu_registers __iomem
*scu_registers
;
185 u16 tci_pool
[SCI_MAX_IO_REQUESTS
];
187 int id
; /* unique within a given pci device */
188 struct isci_phy phys
[SCI_MAX_PHYS
];
189 struct isci_port ports
[SCI_MAX_PORTS
+ 1]; /* includes dummy port */
190 struct sas_ha_struct sas_ha
;
192 spinlock_t state_lock
;
193 struct pci_dev
*pdev
;
194 enum isci_status status
;
195 #define IHOST_START_PENDING 0
196 #define IHOST_STOP_PENDING 1
198 wait_queue_head_t eventq
;
199 struct Scsi_Host
*shost
;
200 struct tasklet_struct completion_tasklet
;
201 struct list_head requests_to_complete
;
202 struct list_head requests_to_errorback
;
203 spinlock_t scic_lock
;
204 struct isci_request
*reqs
[SCI_MAX_IO_REQUESTS
];
205 struct isci_remote_device devices
[SCI_MAX_REMOTE_DEVICES
];
209 * enum sci_controller_states - This enumeration depicts all the states
210 * for the common controller state machine.
212 enum sci_controller_states
{
214 * Simply the initial state for the base controller state machine.
219 * This state indicates that the controller is reset. The memory for
220 * the controller is in it's initial state, but the controller requires
222 * This state is entered from the INITIAL state.
223 * This state is entered from the RESETTING state.
228 * This state is typically an action state that indicates the controller
229 * is in the process of initialization. In this state no new IO operations
231 * This state is entered from the RESET state.
236 * This state indicates that the controller has been successfully
237 * initialized. In this state no new IO operations are permitted.
238 * This state is entered from the INITIALIZING state.
243 * This state indicates the the controller is in the process of becoming
244 * ready (i.e. starting). In this state no new IO operations are permitted.
245 * This state is entered from the INITIALIZED state.
250 * This state indicates the controller is now ready. Thus, the user
251 * is able to perform IO operations on the controller.
252 * This state is entered from the STARTING state.
257 * This state is typically an action state that indicates the controller
258 * is in the process of resetting. Thus, the user is unable to perform
259 * IO operations on the controller. A reset is considered destructive in
261 * This state is entered from the READY state.
262 * This state is entered from the FAILED state.
263 * This state is entered from the STOPPED state.
268 * This state indicates that the controller is in the process of stopping.
269 * In this state no new IO operations are permitted, but existing IO
270 * operations are allowed to complete.
271 * This state is entered from the READY state.
276 * This state indicates that the controller has successfully been stopped.
277 * In this state no new IO operations are permitted.
278 * This state is entered from the STOPPING state.
283 * This state indicates that the controller could not successfully be
284 * initialized. In this state no new IO operations are permitted.
285 * This state is entered from the INITIALIZING state.
286 * This state is entered from the STARTING state.
287 * This state is entered from the STOPPING state.
288 * This state is entered from the RESETTING state.
294 * struct isci_pci_info - This class represents the pci function containing the
295 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
298 #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
300 struct isci_pci_info
{
301 struct msix_entry msix_entries
[SCI_MAX_MSIX_INT
];
302 struct isci_host
*hosts
[SCI_MAX_CONTROLLERS
];
303 struct isci_orom
*orom
;
306 static inline struct isci_pci_info
*to_pci_info(struct pci_dev
*pdev
)
308 return pci_get_drvdata(pdev
);
311 #define for_each_isci_host(id, ihost, pdev) \
312 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
313 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
314 ihost = to_pci_info(pdev)->hosts[++id])
316 static inline enum isci_status
isci_host_get_state(struct isci_host
*isci_host
)
318 return isci_host
->status
;
321 static inline void isci_host_change_state(struct isci_host
*isci_host
,
322 enum isci_status status
)
326 dev_dbg(&isci_host
->pdev
->dev
,
327 "%s: isci_host = %p, state = 0x%x",
331 spin_lock_irqsave(&isci_host
->state_lock
, flags
);
332 isci_host
->status
= status
;
333 spin_unlock_irqrestore(&isci_host
->state_lock
, flags
);
337 static inline void wait_for_start(struct isci_host
*ihost
)
339 wait_event(ihost
->eventq
, !test_bit(IHOST_START_PENDING
, &ihost
->flags
));
342 static inline void wait_for_stop(struct isci_host
*ihost
)
344 wait_event(ihost
->eventq
, !test_bit(IHOST_STOP_PENDING
, &ihost
->flags
));
347 static inline void wait_for_device_start(struct isci_host
*ihost
, struct isci_remote_device
*idev
)
349 wait_event(ihost
->eventq
, !test_bit(IDEV_START_PENDING
, &idev
->flags
));
352 static inline void wait_for_device_stop(struct isci_host
*ihost
, struct isci_remote_device
*idev
)
354 wait_event(ihost
->eventq
, !test_bit(IDEV_STOP_PENDING
, &idev
->flags
));
357 static inline struct isci_host
*dev_to_ihost(struct domain_device
*dev
)
359 return dev
->port
->ha
->lldd_ha
;
362 /* we always use protocol engine group zero */
365 /* see sci_controller_io_tag_allocate|free for how seq and tci are built */
366 #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
368 /* these are returned by the hardware, so sanitize them */
369 #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
370 #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
372 /* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
373 #define ISCI_COALESCE_BASE 9
375 /* expander attached sata devices require 3 rnc slots */
376 static inline int sci_remote_device_node_count(struct isci_remote_device
*idev
)
378 struct domain_device
*dev
= idev
->domain_dev
;
380 if ((dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
)) &&
381 !idev
->is_direct_attached
)
382 return SCU_STP_REMOTE_NODE_COUNT
;
383 return SCU_SSP_REMOTE_NODE_COUNT
;
387 * sci_controller_clear_invalid_phy() -
389 * This macro will clear the bit in the invalid phy mask for this controller
390 * object. This is used to control messages reported for invalid link up
393 #define sci_controller_clear_invalid_phy(controller, phy) \
394 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
396 static inline struct device
*sciphy_to_dev(struct isci_phy
*iphy
)
399 if (!iphy
|| !iphy
->isci_port
|| !iphy
->isci_port
->isci_host
)
402 return &iphy
->isci_port
->isci_host
->pdev
->dev
;
405 static inline struct device
*sciport_to_dev(struct isci_port
*iport
)
408 if (!iport
|| !iport
->isci_host
)
411 return &iport
->isci_host
->pdev
->dev
;
414 static inline struct device
*scirdev_to_dev(struct isci_remote_device
*idev
)
416 if (!idev
|| !idev
->isci_port
|| !idev
->isci_port
->isci_host
)
419 return &idev
->isci_port
->isci_host
->pdev
->dev
;
422 static inline bool is_a2(struct pci_dev
*pdev
)
424 if (pdev
->revision
< 4)
429 static inline bool is_b0(struct pci_dev
*pdev
)
431 if (pdev
->revision
== 4)
436 static inline bool is_c0(struct pci_dev
*pdev
)
438 if (pdev
->revision
== 5)
443 static inline bool is_c1(struct pci_dev
*pdev
)
445 if (pdev
->revision
>= 6)
450 enum cable_selections
{
457 #define CABLE_OVERRIDE_DISABLED (0x10000)
459 static inline int is_cable_select_overridden(void)
461 return cable_selection_override
< CABLE_OVERRIDE_DISABLED
;
464 enum cable_selections
decode_cable_selection(struct isci_host
*ihost
, int phy
);
465 void validate_cable_selections(struct isci_host
*ihost
);
466 char *lookup_cable_names(enum cable_selections
);
468 /* set hw control for 'activity', even though active enclosures seem to drive
469 * the activity led on their own. Skip setting FSENG control on 'status' due
470 * to unexpected operation and 'error' due to not being a supported automatic
473 #define SGPIO_HW_CONTROL 0x00000443
475 static inline int isci_gpio_count(struct isci_host
*ihost
)
477 return ARRAY_SIZE(ihost
->scu_registers
->peg0
.sgpio
.output_data_select
);
480 void sci_controller_post_request(struct isci_host
*ihost
,
482 void sci_controller_release_frame(struct isci_host
*ihost
,
484 void sci_controller_copy_sata_response(void *response_buffer
,
487 enum sci_status
sci_controller_allocate_remote_node_context(struct isci_host
*ihost
,
488 struct isci_remote_device
*idev
,
490 void sci_controller_free_remote_node_context(
491 struct isci_host
*ihost
,
492 struct isci_remote_device
*idev
,
495 struct isci_request
*sci_request_by_tag(struct isci_host
*ihost
,
498 void sci_controller_power_control_queue_insert(
499 struct isci_host
*ihost
,
500 struct isci_phy
*iphy
);
502 void sci_controller_power_control_queue_remove(
503 struct isci_host
*ihost
,
504 struct isci_phy
*iphy
);
506 void sci_controller_link_up(
507 struct isci_host
*ihost
,
508 struct isci_port
*iport
,
509 struct isci_phy
*iphy
);
511 void sci_controller_link_down(
512 struct isci_host
*ihost
,
513 struct isci_port
*iport
,
514 struct isci_phy
*iphy
);
516 void sci_controller_remote_device_stopped(
517 struct isci_host
*ihost
,
518 struct isci_remote_device
*idev
);
520 void sci_controller_copy_task_context(
521 struct isci_host
*ihost
,
522 struct isci_request
*ireq
);
524 void sci_controller_register_setup(struct isci_host
*ihost
);
526 enum sci_status
sci_controller_continue_io(struct isci_request
*ireq
);
527 int isci_host_scan_finished(struct Scsi_Host
*, unsigned long);
528 void isci_host_scan_start(struct Scsi_Host
*);
529 u16
isci_alloc_tag(struct isci_host
*ihost
);
530 enum sci_status
isci_free_tag(struct isci_host
*ihost
, u16 io_tag
);
531 void isci_tci_free(struct isci_host
*ihost
, u16 tci
);
533 int isci_host_init(struct isci_host
*);
535 void isci_host_init_controller_names(
536 struct isci_host
*isci_host
,
537 unsigned int controller_idx
);
539 void isci_host_deinit(
542 void isci_host_port_link_up(
546 int isci_host_dev_found(struct domain_device
*);
548 void isci_host_remote_device_start_complete(
550 struct isci_remote_device
*,
553 void sci_controller_disable_interrupts(
554 struct isci_host
*ihost
);
556 enum sci_status
sci_controller_start_io(
557 struct isci_host
*ihost
,
558 struct isci_remote_device
*idev
,
559 struct isci_request
*ireq
);
561 enum sci_task_status
sci_controller_start_task(
562 struct isci_host
*ihost
,
563 struct isci_remote_device
*idev
,
564 struct isci_request
*ireq
);
566 enum sci_status
sci_controller_terminate_request(
567 struct isci_host
*ihost
,
568 struct isci_remote_device
*idev
,
569 struct isci_request
*ireq
);
571 enum sci_status
sci_controller_complete_io(
572 struct isci_host
*ihost
,
573 struct isci_remote_device
*idev
,
574 struct isci_request
*ireq
);
576 void sci_port_configuration_agent_construct(
577 struct sci_port_configuration_agent
*port_agent
);
579 enum sci_status
sci_port_configuration_agent_initialize(
580 struct isci_host
*ihost
,
581 struct sci_port_configuration_agent
*port_agent
);
583 int isci_gpio_write(struct sas_ha_struct
*, u8 reg_type
, u8 reg_index
,
584 u8 reg_count
, u8
*write_data
);