Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / scsi / mvsas / mv_64xx.c
blob1f2b61de8c6326624fda0e4bdded4f77f5c4ae0b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Marvell 88SE64xx hardware specific
5 * Copyright 2007 Red Hat, Inc.
6 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 */
10 #include "mv_sas.h"
11 #include "mv_64xx.h"
12 #include "mv_chips.h"
14 static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
16 void __iomem *regs = mvi->regs;
17 u32 reg;
18 struct mvs_phy *phy = &mvi->phy[i];
20 reg = mr32(MVS_GBL_PORT_TYPE);
21 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
22 if (reg & MODE_SAS_SATA & (1 << i))
23 phy->phy_type |= PORT_TYPE_SAS;
24 else
25 phy->phy_type |= PORT_TYPE_SATA;
28 static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
30 void __iomem *regs = mvi->regs;
31 u32 tmp;
33 tmp = mr32(MVS_PCS);
34 if (mvi->chip->n_phy <= MVS_SOC_PORTS)
35 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
36 else
37 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
38 mw32(MVS_PCS, tmp);
41 static void mvs_64xx_phy_hacks(struct mvs_info *mvi)
43 void __iomem *regs = mvi->regs;
44 int i;
46 mvs_phy_hacks(mvi);
48 if (!(mvi->flags & MVF_FLAG_SOC)) {
49 for (i = 0; i < MVS_SOC_PORTS; i++) {
50 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
51 mvs_write_port_vsr_data(mvi, i, 0x2F0);
53 } else {
54 /* disable auto port detection */
55 mw32(MVS_GBL_PORT_TYPE, 0);
56 for (i = 0; i < mvi->chip->n_phy; i++) {
57 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
58 mvs_write_port_vsr_data(mvi, i, 0x90000000);
59 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
60 mvs_write_port_vsr_data(mvi, i, 0x50f2);
61 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
62 mvs_write_port_vsr_data(mvi, i, 0x0e);
67 static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
69 void __iomem *regs = mvi->regs;
70 u32 reg, tmp;
72 if (!(mvi->flags & MVF_FLAG_SOC)) {
73 if (phy_id < MVS_SOC_PORTS)
74 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
75 else
76 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
78 } else
79 reg = mr32(MVS_PHY_CTL);
81 tmp = reg;
82 if (phy_id < MVS_SOC_PORTS)
83 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
84 else
85 tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
87 if (!(mvi->flags & MVF_FLAG_SOC)) {
88 if (phy_id < MVS_SOC_PORTS) {
89 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
90 mdelay(10);
91 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
92 } else {
93 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
94 mdelay(10);
95 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
97 } else {
98 mw32(MVS_PHY_CTL, tmp);
99 mdelay(10);
100 mw32(MVS_PHY_CTL, reg);
104 static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
106 u32 tmp;
107 tmp = mvs_read_port_irq_stat(mvi, phy_id);
108 tmp &= ~PHYEV_RDY_CH;
109 mvs_write_port_irq_stat(mvi, phy_id, tmp);
110 tmp = mvs_read_phy_ctl(mvi, phy_id);
111 if (hard == MVS_HARD_RESET)
112 tmp |= PHY_RST_HARD;
113 else if (hard == MVS_SOFT_RESET)
114 tmp |= PHY_RST;
115 mvs_write_phy_ctl(mvi, phy_id, tmp);
116 if (hard) {
117 do {
118 tmp = mvs_read_phy_ctl(mvi, phy_id);
119 } while (tmp & PHY_RST_HARD);
123 static void
124 mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
126 void __iomem *regs = mvi->regs;
127 u32 tmp;
128 if (clear_all) {
129 tmp = mr32(MVS_INT_STAT_SRS_0);
130 if (tmp) {
131 printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
132 mw32(MVS_INT_STAT_SRS_0, tmp);
134 } else {
135 tmp = mr32(MVS_INT_STAT_SRS_0);
136 if (tmp & (1 << (reg_set % 32))) {
137 printk(KERN_DEBUG "register set 0x%x was stopped.\n",
138 reg_set);
139 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
144 static int mvs_64xx_chip_reset(struct mvs_info *mvi)
146 void __iomem *regs = mvi->regs;
147 u32 tmp;
148 int i;
150 /* make sure interrupts are masked immediately (paranoia) */
151 mw32(MVS_GBL_CTL, 0);
152 tmp = mr32(MVS_GBL_CTL);
154 /* Reset Controller */
155 if (!(tmp & HBA_RST)) {
156 if (mvi->flags & MVF_PHY_PWR_FIX) {
157 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
158 tmp &= ~PCTL_PWR_OFF;
159 tmp |= PCTL_PHY_DSBL;
160 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
162 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
163 tmp &= ~PCTL_PWR_OFF;
164 tmp |= PCTL_PHY_DSBL;
165 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 /* make sure interrupts are masked immediately (paranoia) */
170 mw32(MVS_GBL_CTL, 0);
171 tmp = mr32(MVS_GBL_CTL);
173 /* Reset Controller */
174 if (!(tmp & HBA_RST)) {
175 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
176 mw32_f(MVS_GBL_CTL, HBA_RST);
179 /* wait for reset to finish; timeout is just a guess */
180 i = 1000;
181 while (i-- > 0) {
182 msleep(10);
184 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
185 break;
187 if (mr32(MVS_GBL_CTL) & HBA_RST) {
188 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
189 return -EBUSY;
191 return 0;
194 static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
196 void __iomem *regs = mvi->regs;
197 u32 tmp;
198 if (!(mvi->flags & MVF_FLAG_SOC)) {
199 u32 offs;
200 if (phy_id < 4)
201 offs = PCR_PHY_CTL;
202 else {
203 offs = PCR_PHY_CTL2;
204 phy_id -= 4;
206 pci_read_config_dword(mvi->pdev, offs, &tmp);
207 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
208 pci_write_config_dword(mvi->pdev, offs, tmp);
209 } else {
210 tmp = mr32(MVS_PHY_CTL);
211 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
212 mw32(MVS_PHY_CTL, tmp);
216 static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
218 void __iomem *regs = mvi->regs;
219 u32 tmp;
220 if (!(mvi->flags & MVF_FLAG_SOC)) {
221 u32 offs;
222 if (phy_id < 4)
223 offs = PCR_PHY_CTL;
224 else {
225 offs = PCR_PHY_CTL2;
226 phy_id -= 4;
228 pci_read_config_dword(mvi->pdev, offs, &tmp);
229 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
230 pci_write_config_dword(mvi->pdev, offs, tmp);
231 } else {
232 tmp = mr32(MVS_PHY_CTL);
233 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
234 mw32(MVS_PHY_CTL, tmp);
238 static int mvs_64xx_init(struct mvs_info *mvi)
240 void __iomem *regs = mvi->regs;
241 int i;
242 u32 tmp, cctl;
244 if (mvi->pdev && mvi->pdev->revision == 0)
245 mvi->flags |= MVF_PHY_PWR_FIX;
246 if (!(mvi->flags & MVF_FLAG_SOC)) {
247 mvs_show_pcie_usage(mvi);
248 tmp = mvs_64xx_chip_reset(mvi);
249 if (tmp)
250 return tmp;
251 } else {
252 tmp = mr32(MVS_PHY_CTL);
253 tmp &= ~PCTL_PWR_OFF;
254 tmp |= PCTL_PHY_DSBL;
255 mw32(MVS_PHY_CTL, tmp);
258 /* Init Chip */
259 /* make sure RST is set; HBA_RST /should/ have done that for us */
260 cctl = mr32(MVS_CTL) & 0xFFFF;
261 if (cctl & CCTL_RST)
262 cctl &= ~CCTL_RST;
263 else
264 mw32_f(MVS_CTL, cctl | CCTL_RST);
266 if (!(mvi->flags & MVF_FLAG_SOC)) {
267 /* write to device control _AND_ device status register */
268 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
269 tmp &= ~PRD_REQ_MASK;
270 tmp |= PRD_REQ_SIZE;
271 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
273 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
274 tmp &= ~PCTL_PWR_OFF;
275 tmp &= ~PCTL_PHY_DSBL;
276 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
278 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
279 tmp &= PCTL_PWR_OFF;
280 tmp &= ~PCTL_PHY_DSBL;
281 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
282 } else {
283 tmp = mr32(MVS_PHY_CTL);
284 tmp &= ~PCTL_PWR_OFF;
285 tmp |= PCTL_COM_ON;
286 tmp &= ~PCTL_PHY_DSBL;
287 tmp |= PCTL_LINK_RST;
288 mw32(MVS_PHY_CTL, tmp);
289 msleep(100);
290 tmp &= ~PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
295 /* reset control */
296 mw32(MVS_PCS, 0); /* MVS_PCS */
297 /* init phys */
298 mvs_64xx_phy_hacks(mvi);
300 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
301 tmp &= 0x0000ffff;
302 tmp |= 0x00fa0000;
303 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
305 /* enable auto port detection */
306 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
308 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
309 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
311 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
312 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
314 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
315 mw32(MVS_TX_LO, mvi->tx_dma);
316 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
318 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
319 mw32(MVS_RX_LO, mvi->rx_dma);
320 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
322 for (i = 0; i < mvi->chip->n_phy; i++) {
323 /* set phy local SAS address */
324 /* should set little endian SAS address to 64xx chip */
325 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
326 cpu_to_be64(mvi->phy[i].dev_sas_addr));
328 mvs_64xx_enable_xmt(mvi, i);
330 mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
331 msleep(500);
332 mvs_64xx_detect_porttype(mvi, i);
334 if (mvi->flags & MVF_FLAG_SOC) {
335 /* set select registers */
336 writel(0x0E008000, regs + 0x000);
337 writel(0x59000008, regs + 0x004);
338 writel(0x20, regs + 0x008);
339 writel(0x20, regs + 0x00c);
340 writel(0x20, regs + 0x010);
341 writel(0x20, regs + 0x014);
342 writel(0x20, regs + 0x018);
343 writel(0x20, regs + 0x01c);
345 for (i = 0; i < mvi->chip->n_phy; i++) {
346 /* clear phy int status */
347 tmp = mvs_read_port_irq_stat(mvi, i);
348 tmp &= ~PHYEV_SIG_FIS;
349 mvs_write_port_irq_stat(mvi, i, tmp);
351 /* set phy int mask */
352 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
353 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
354 PHYEV_DEC_ERR;
355 mvs_write_port_irq_mask(mvi, i, tmp);
357 msleep(100);
358 mvs_update_phyinfo(mvi, i, 1);
361 /* little endian for open address and command table, etc. */
362 cctl = mr32(MVS_CTL);
363 cctl |= CCTL_ENDIAN_CMD;
364 cctl |= CCTL_ENDIAN_DATA;
365 cctl &= ~CCTL_ENDIAN_OPEN;
366 cctl |= CCTL_ENDIAN_RSP;
367 mw32_f(MVS_CTL, cctl);
369 /* reset CMD queue */
370 tmp = mr32(MVS_PCS);
371 tmp |= PCS_CMD_RST;
372 tmp &= ~PCS_SELF_CLEAR;
373 mw32(MVS_PCS, tmp);
375 * the max count is 0x1ff, while our max slot is 0x200,
376 * it will make count 0.
378 tmp = 0;
379 if (MVS_CHIP_SLOT_SZ > 0x1ff)
380 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
381 else
382 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
384 tmp = 0x10000 | interrupt_coalescing;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
399 mw32(MVS_INT_MASK, tmp);
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
404 return 0;
407 static int mvs_64xx_ioremap(struct mvs_info *mvi)
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
414 static void mvs_64xx_iounmap(struct mvs_info *mvi)
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
420 static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
429 static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
438 static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
440 void __iomem *regs = mvi->regs;
441 u32 stat;
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
453 static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
455 void __iomem *regs = mvi->regs;
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
460 spin_lock(&mvi->lock);
461 mvs_int_full(mvi);
462 spin_unlock(&mvi->lock);
464 return IRQ_HANDLED;
467 static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
469 u32 tmp;
470 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
471 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
472 do {
473 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
474 } while (tmp & 1 << (slot_idx % 32));
475 do {
476 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
477 } while (tmp & 1 << (slot_idx % 32));
480 static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
481 u32 tfs)
483 void __iomem *regs = mvi->regs;
484 u32 tmp;
486 if (type == PORT_TYPE_SATA) {
487 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
488 mw32(MVS_INT_STAT_SRS_0, tmp);
490 mw32(MVS_INT_STAT, CINT_CI_STOP);
491 tmp = mr32(MVS_PCS) | 0xFF00;
492 mw32(MVS_PCS, tmp);
495 static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
497 void __iomem *regs = mvi->regs;
498 u32 tmp, offs;
500 if (*tfs == MVS_ID_NOT_MAPPED)
501 return;
503 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
504 if (*tfs < 16) {
505 tmp = mr32(MVS_PCS);
506 mw32(MVS_PCS, tmp & ~offs);
507 } else {
508 tmp = mr32(MVS_CTL);
509 mw32(MVS_CTL, tmp & ~offs);
512 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
513 if (tmp)
514 mw32(MVS_INT_STAT_SRS_0, tmp);
516 *tfs = MVS_ID_NOT_MAPPED;
517 return;
520 static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
522 int i;
523 u32 tmp, offs;
524 void __iomem *regs = mvi->regs;
526 if (*tfs != MVS_ID_NOT_MAPPED)
527 return 0;
529 tmp = mr32(MVS_PCS);
531 for (i = 0; i < mvi->chip->srs_sz; i++) {
532 if (i == 16)
533 tmp = mr32(MVS_CTL);
534 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
535 if (!(tmp & offs)) {
536 *tfs = i;
538 if (i < 16)
539 mw32(MVS_PCS, tmp | offs);
540 else
541 mw32(MVS_CTL, tmp | offs);
542 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
543 if (tmp)
544 mw32(MVS_INT_STAT_SRS_0, tmp);
545 return 0;
548 return MVS_ID_NOT_MAPPED;
551 static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
553 int i;
554 struct scatterlist *sg;
555 struct mvs_prd *buf_prd = prd;
556 for_each_sg(scatter, sg, nr, i) {
557 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
558 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
559 buf_prd++;
563 static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
565 u32 phy_st;
566 mvs_write_port_cfg_addr(mvi, i,
567 PHYR_PHY_STAT);
568 phy_st = mvs_read_port_cfg_data(mvi, i);
569 if (phy_st & PHY_OOB_DTCTD)
570 return 1;
571 return 0;
574 static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
575 struct sas_identify_frame *id)
578 struct mvs_phy *phy = &mvi->phy[i];
579 struct asd_sas_phy *sas_phy = &phy->sas_phy;
581 sas_phy->linkrate =
582 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
583 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
585 phy->minimum_linkrate =
586 (phy->phy_status &
587 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
588 phy->maximum_linkrate =
589 (phy->phy_status &
590 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
592 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
593 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
595 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
596 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
598 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
599 phy->att_dev_sas_addr =
600 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
601 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
602 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
603 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606 static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
608 u32 tmp;
609 struct mvs_phy *phy = &mvi->phy[i];
610 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
611 tmp = mvs_read_port_vsr_data(mvi, i);
612 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
613 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
614 SAS_LINK_RATE_1_5_GBPS)
615 tmp &= ~PHY_MODE6_LATECLK;
616 else
617 tmp |= PHY_MODE6_LATECLK;
618 mvs_write_port_vsr_data(mvi, i, tmp);
621 static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
622 struct sas_phy_linkrates *rates)
624 u32 lrmin = 0, lrmax = 0;
625 u32 tmp;
627 tmp = mvs_read_phy_ctl(mvi, phy_id);
628 lrmin = (rates->minimum_linkrate << 8);
629 lrmax = (rates->maximum_linkrate << 12);
631 if (lrmin) {
632 tmp &= ~(0xf << 8);
633 tmp |= lrmin;
635 if (lrmax) {
636 tmp &= ~(0xf << 12);
637 tmp |= lrmax;
639 mvs_write_phy_ctl(mvi, phy_id, tmp);
640 mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
643 static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
645 u32 tmp;
646 void __iomem *regs = mvi->regs;
647 tmp = mr32(MVS_PCS);
648 mw32(MVS_PCS, tmp & 0xFFFF);
649 mw32(MVS_PCS, tmp);
650 tmp = mr32(MVS_CTL);
651 mw32(MVS_CTL, tmp & 0xFFFF);
652 mw32(MVS_CTL, tmp);
656 static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
658 void __iomem *regs = mvi->regs_ex;
659 return ior32(SPI_DATA_REG_64XX);
662 static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
664 void __iomem *regs = mvi->regs_ex;
666 iow32(SPI_DATA_REG_64XX, data);
670 static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
671 u32 *dwCmd,
672 u8 cmd,
673 u8 read,
674 u8 length,
675 u32 addr
678 u32 dwTmp;
680 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
681 if (read)
682 dwTmp |= 1U<<23;
684 if (addr != MV_MAX_U32) {
685 dwTmp |= 1U<<22;
686 dwTmp |= (addr & 0x0003FFFF);
689 *dwCmd = dwTmp;
690 return 0;
694 static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
696 void __iomem *regs = mvi->regs_ex;
697 int retry;
699 for (retry = 0; retry < 1; retry++) {
700 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
701 iow32(SPI_CMD_REG_64XX, cmd);
702 iow32(SPI_CTRL_REG_64XX,
703 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 return 0;
709 static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
711 void __iomem *regs = mvi->regs_ex;
712 u32 i, dwTmp;
714 for (i = 0; i < timeout; i++) {
715 dwTmp = ior32(SPI_CTRL_REG_64XX);
716 if (!(dwTmp & SPI_CTRL_SPISTART))
717 return 0;
718 msleep(10);
721 return -1;
724 static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
725 int buf_len, int from, void *prd)
727 int i;
728 struct mvs_prd *buf_prd = prd;
729 dma_addr_t buf_dma = mvi->bulk_buffer_dma;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
739 static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
741 void __iomem *regs = mvi->regs;
742 u32 tmp = 0;
744 * the max count is 0x1ff, while our max slot is 0x200,
745 * it will make count 0.
747 if (time == 0) {
748 mw32(MVS_INT_COAL, 0);
749 mw32(MVS_INT_COAL_TMOUT, 0x10000);
750 } else {
751 if (MVS_CHIP_SLOT_SZ > 0x1ff)
752 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
753 else
754 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
756 tmp = 0x10000 | time;
757 mw32(MVS_INT_COAL_TMOUT, tmp);
761 const struct mvs_dispatch mvs_64xx_dispatch = {
762 "mv64xx",
763 mvs_64xx_init,
764 NULL,
765 mvs_64xx_ioremap,
766 mvs_64xx_iounmap,
767 mvs_64xx_isr,
768 mvs_64xx_isr_status,
769 mvs_64xx_interrupt_enable,
770 mvs_64xx_interrupt_disable,
771 mvs_read_phy_ctl,
772 mvs_write_phy_ctl,
773 mvs_read_port_cfg_data,
774 mvs_write_port_cfg_data,
775 mvs_write_port_cfg_addr,
776 mvs_read_port_vsr_data,
777 mvs_write_port_vsr_data,
778 mvs_write_port_vsr_addr,
779 mvs_read_port_irq_stat,
780 mvs_write_port_irq_stat,
781 mvs_read_port_irq_mask,
782 mvs_write_port_irq_mask,
783 mvs_64xx_command_active,
784 mvs_64xx_clear_srs_irq,
785 mvs_64xx_issue_stop,
786 mvs_start_delivery,
787 mvs_rx_update,
788 mvs_int_full,
789 mvs_64xx_assign_reg_set,
790 mvs_64xx_free_reg_set,
791 mvs_get_prd_size,
792 mvs_get_prd_count,
793 mvs_64xx_make_prd,
794 mvs_64xx_detect_porttype,
795 mvs_64xx_oob_done,
796 mvs_64xx_fix_phy_info,
797 mvs_64xx_phy_work_around,
798 mvs_64xx_phy_set_link_rate,
799 mvs_hw_max_link_rate,
800 mvs_64xx_phy_disable,
801 mvs_64xx_phy_enable,
802 mvs_64xx_phy_reset,
803 mvs_64xx_stp_reset,
804 mvs_64xx_clear_active_cmds,
805 mvs_64xx_spi_read_data,
806 mvs_64xx_spi_write_data,
807 mvs_64xx_spi_buildcmd,
808 mvs_64xx_spi_issuecmd,
809 mvs_64xx_spi_waitdataready,
810 mvs_64xx_fix_dma,
811 mvs_64xx_tune_interrupt,
812 NULL,