Linux 2.6.17.7
[linux/fpc-iii.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
blob8406839b91cf3879bff26215f08cd9addc7ab312
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
41 #include <rdma/ib_cache.h>
43 #include "ipoib.h"
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
51 #endif
53 #define IPOIB_OP_RECV (1ul << 31)
55 static DEFINE_MUTEX(pkey_mutex);
57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
58 struct ib_pd *pd, struct ib_ah_attr *attr)
60 struct ipoib_ah *ah;
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
63 if (!ah)
64 return NULL;
66 ah->dev = dev;
67 ah->last_send = 0;
68 kref_init(&ah->ref);
70 ah->ah = ib_create_ah(pd, attr);
71 if (IS_ERR(ah->ah)) {
72 kfree(ah);
73 ah = NULL;
74 } else
75 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
77 return ah;
80 void ipoib_free_ah(struct kref *kref)
82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
85 unsigned long flags;
87 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
88 ipoib_dbg(priv, "Freeing ah %p\n", ah->ah);
89 ib_destroy_ah(ah->ah);
90 kfree(ah);
91 } else {
92 spin_lock_irqsave(&priv->lock, flags);
93 list_add_tail(&ah->list, &priv->dead_ahs);
94 spin_unlock_irqrestore(&priv->lock, flags);
98 static int ipoib_ib_post_receive(struct net_device *dev, int id)
100 struct ipoib_dev_priv *priv = netdev_priv(dev);
101 struct ib_sge list;
102 struct ib_recv_wr param;
103 struct ib_recv_wr *bad_wr;
104 int ret;
106 list.addr = priv->rx_ring[id].mapping;
107 list.length = IPOIB_BUF_SIZE;
108 list.lkey = priv->mr->lkey;
110 param.next = NULL;
111 param.wr_id = id | IPOIB_OP_RECV;
112 param.sg_list = &list;
113 param.num_sge = 1;
115 ret = ib_post_recv(priv->qp, &param, &bad_wr);
116 if (unlikely(ret)) {
117 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
118 dma_unmap_single(priv->ca->dma_device,
119 priv->rx_ring[id].mapping,
120 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
121 dev_kfree_skb_any(priv->rx_ring[id].skb);
122 priv->rx_ring[id].skb = NULL;
125 return ret;
128 static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
130 struct ipoib_dev_priv *priv = netdev_priv(dev);
131 struct sk_buff *skb;
132 dma_addr_t addr;
134 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
135 if (!skb)
136 return -ENOMEM;
139 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
140 * header. So we need 4 more bytes to get to 48 and align the
141 * IP header to a multiple of 16.
143 skb_reserve(skb, 4);
145 addr = dma_map_single(priv->ca->dma_device,
146 skb->data, IPOIB_BUF_SIZE,
147 DMA_FROM_DEVICE);
148 if (unlikely(dma_mapping_error(addr))) {
149 dev_kfree_skb_any(skb);
150 return -EIO;
153 priv->rx_ring[id].skb = skb;
154 priv->rx_ring[id].mapping = addr;
156 return 0;
159 static int ipoib_ib_post_receives(struct net_device *dev)
161 struct ipoib_dev_priv *priv = netdev_priv(dev);
162 int i;
164 for (i = 0; i < ipoib_recvq_size; ++i) {
165 if (ipoib_alloc_rx_skb(dev, i)) {
166 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
167 return -ENOMEM;
169 if (ipoib_ib_post_receive(dev, i)) {
170 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
171 return -EIO;
175 return 0;
178 static void ipoib_ib_handle_wc(struct net_device *dev,
179 struct ib_wc *wc)
181 struct ipoib_dev_priv *priv = netdev_priv(dev);
182 unsigned int wr_id = wc->wr_id;
184 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n",
185 wr_id, wc->opcode, wc->status);
187 if (wr_id & IPOIB_OP_RECV) {
188 wr_id &= ~IPOIB_OP_RECV;
190 if (wr_id < ipoib_recvq_size) {
191 struct sk_buff *skb = priv->rx_ring[wr_id].skb;
192 dma_addr_t addr = priv->rx_ring[wr_id].mapping;
194 if (unlikely(wc->status != IB_WC_SUCCESS)) {
195 if (wc->status != IB_WC_WR_FLUSH_ERR)
196 ipoib_warn(priv, "failed recv event "
197 "(status=%d, wrid=%d vend_err %x)\n",
198 wc->status, wr_id, wc->vendor_err);
199 dma_unmap_single(priv->ca->dma_device, addr,
200 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
201 dev_kfree_skb_any(skb);
202 priv->rx_ring[wr_id].skb = NULL;
203 return;
207 * If we can't allocate a new RX buffer, dump
208 * this packet and reuse the old buffer.
210 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
211 ++priv->stats.rx_dropped;
212 goto repost;
215 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
216 wc->byte_len, wc->slid);
218 dma_unmap_single(priv->ca->dma_device, addr,
219 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
221 skb_put(skb, wc->byte_len);
222 skb_pull(skb, IB_GRH_BYTES);
224 if (wc->slid != priv->local_lid ||
225 wc->src_qp != priv->qp->qp_num) {
226 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
227 skb->mac.raw = skb->data;
228 skb_pull(skb, IPOIB_ENCAP_LEN);
230 dev->last_rx = jiffies;
231 ++priv->stats.rx_packets;
232 priv->stats.rx_bytes += skb->len;
234 skb->dev = dev;
235 /* XXX get correct PACKET_ type here */
236 skb->pkt_type = PACKET_HOST;
237 netif_rx_ni(skb);
238 } else {
239 ipoib_dbg_data(priv, "dropping loopback packet\n");
240 dev_kfree_skb_any(skb);
243 repost:
244 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
245 ipoib_warn(priv, "ipoib_ib_post_receive failed "
246 "for buf %d\n", wr_id);
247 } else
248 ipoib_warn(priv, "completion event with wrid %d\n",
249 wr_id);
251 } else {
252 struct ipoib_tx_buf *tx_req;
253 unsigned long flags;
255 if (wr_id >= ipoib_sendq_size) {
256 ipoib_warn(priv, "completion event with wrid %d (> %d)\n",
257 wr_id, ipoib_sendq_size);
258 return;
261 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id);
263 tx_req = &priv->tx_ring[wr_id];
265 dma_unmap_single(priv->ca->dma_device,
266 pci_unmap_addr(tx_req, mapping),
267 tx_req->skb->len,
268 DMA_TO_DEVICE);
270 ++priv->stats.tx_packets;
271 priv->stats.tx_bytes += tx_req->skb->len;
273 dev_kfree_skb_any(tx_req->skb);
275 spin_lock_irqsave(&priv->tx_lock, flags);
276 ++priv->tx_tail;
277 if (netif_queue_stopped(dev) &&
278 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
279 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
280 netif_wake_queue(dev);
281 spin_unlock_irqrestore(&priv->tx_lock, flags);
283 if (wc->status != IB_WC_SUCCESS &&
284 wc->status != IB_WC_WR_FLUSH_ERR)
285 ipoib_warn(priv, "failed send event "
286 "(status=%d, wrid=%d vend_err %x)\n",
287 wc->status, wr_id, wc->vendor_err);
291 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
293 struct net_device *dev = (struct net_device *) dev_ptr;
294 struct ipoib_dev_priv *priv = netdev_priv(dev);
295 int n, i;
297 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
298 do {
299 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc);
300 for (i = 0; i < n; ++i)
301 ipoib_ib_handle_wc(dev, priv->ibwc + i);
302 } while (n == IPOIB_NUM_WC);
305 static inline int post_send(struct ipoib_dev_priv *priv,
306 unsigned int wr_id,
307 struct ib_ah *address, u32 qpn,
308 dma_addr_t addr, int len)
310 struct ib_send_wr *bad_wr;
312 priv->tx_sge.addr = addr;
313 priv->tx_sge.length = len;
315 priv->tx_wr.wr_id = wr_id;
316 priv->tx_wr.wr.ud.remote_qpn = qpn;
317 priv->tx_wr.wr.ud.ah = address;
319 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
322 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
323 struct ipoib_ah *address, u32 qpn)
325 struct ipoib_dev_priv *priv = netdev_priv(dev);
326 struct ipoib_tx_buf *tx_req;
327 dma_addr_t addr;
329 if (skb->len > dev->mtu + INFINIBAND_ALEN) {
330 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
331 skb->len, dev->mtu + INFINIBAND_ALEN);
332 ++priv->stats.tx_dropped;
333 ++priv->stats.tx_errors;
334 dev_kfree_skb_any(skb);
335 return;
338 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
339 skb->len, address, qpn);
342 * We put the skb into the tx_ring _before_ we call post_send()
343 * because it's entirely possible that the completion handler will
344 * run before we execute anything after the post_send(). That
345 * means we have to make sure everything is properly recorded and
346 * our state is consistent before we call post_send().
348 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
349 tx_req->skb = skb;
350 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
351 DMA_TO_DEVICE);
352 pci_unmap_addr_set(tx_req, mapping, addr);
354 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
355 address->ah, qpn, addr, skb->len))) {
356 ipoib_warn(priv, "post_send failed\n");
357 ++priv->stats.tx_errors;
358 dma_unmap_single(priv->ca->dma_device, addr, skb->len,
359 DMA_TO_DEVICE);
360 dev_kfree_skb_any(skb);
361 } else {
362 dev->trans_start = jiffies;
364 address->last_send = priv->tx_head;
365 ++priv->tx_head;
367 if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) {
368 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
369 netif_stop_queue(dev);
374 static void __ipoib_reap_ah(struct net_device *dev)
376 struct ipoib_dev_priv *priv = netdev_priv(dev);
377 struct ipoib_ah *ah, *tah;
378 LIST_HEAD(remove_list);
380 spin_lock_irq(&priv->lock);
381 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
382 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
383 list_del(&ah->list);
384 list_add_tail(&ah->list, &remove_list);
386 spin_unlock_irq(&priv->lock);
388 list_for_each_entry_safe(ah, tah, &remove_list, list) {
389 ipoib_dbg(priv, "Reaping ah %p\n", ah->ah);
390 ib_destroy_ah(ah->ah);
391 kfree(ah);
395 void ipoib_reap_ah(void *dev_ptr)
397 struct net_device *dev = dev_ptr;
398 struct ipoib_dev_priv *priv = netdev_priv(dev);
400 __ipoib_reap_ah(dev);
402 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
403 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
406 int ipoib_ib_dev_open(struct net_device *dev)
408 struct ipoib_dev_priv *priv = netdev_priv(dev);
409 int ret;
411 ret = ipoib_init_qp(dev);
412 if (ret) {
413 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
414 return -1;
417 ret = ipoib_ib_post_receives(dev);
418 if (ret) {
419 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
420 ipoib_ib_dev_stop(dev);
421 return -1;
424 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
425 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
427 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
429 return 0;
432 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
434 struct ipoib_dev_priv *priv = netdev_priv(dev);
435 u16 pkey_index = 0;
437 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
438 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
439 else
440 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
443 int ipoib_ib_dev_up(struct net_device *dev)
445 struct ipoib_dev_priv *priv = netdev_priv(dev);
447 ipoib_pkey_dev_check_presence(dev);
449 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
450 ipoib_dbg(priv, "PKEY is not assigned.\n");
451 return 0;
454 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
456 return ipoib_mcast_start_thread(dev);
459 int ipoib_ib_dev_down(struct net_device *dev, int flush)
461 struct ipoib_dev_priv *priv = netdev_priv(dev);
463 ipoib_dbg(priv, "downing ib_dev\n");
465 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
466 netif_carrier_off(dev);
468 /* Shutdown the P_Key thread if still active */
469 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
470 mutex_lock(&pkey_mutex);
471 set_bit(IPOIB_PKEY_STOP, &priv->flags);
472 cancel_delayed_work(&priv->pkey_task);
473 mutex_unlock(&pkey_mutex);
474 if (flush)
475 flush_workqueue(ipoib_workqueue);
478 ipoib_mcast_stop_thread(dev, flush);
479 ipoib_mcast_dev_flush(dev);
481 ipoib_flush_paths(dev);
483 return 0;
486 static int recvs_pending(struct net_device *dev)
488 struct ipoib_dev_priv *priv = netdev_priv(dev);
489 int pending = 0;
490 int i;
492 for (i = 0; i < ipoib_recvq_size; ++i)
493 if (priv->rx_ring[i].skb)
494 ++pending;
496 return pending;
499 int ipoib_ib_dev_stop(struct net_device *dev)
501 struct ipoib_dev_priv *priv = netdev_priv(dev);
502 struct ib_qp_attr qp_attr;
503 unsigned long begin;
504 struct ipoib_tx_buf *tx_req;
505 int i;
507 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
510 * Move our QP to the error state and then reinitialize in
511 * when all work requests have completed or have been flushed.
513 qp_attr.qp_state = IB_QPS_ERR;
514 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
515 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
517 /* Wait for all sends and receives to complete */
518 begin = jiffies;
520 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
521 if (time_after(jiffies, begin + 5 * HZ)) {
522 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
523 priv->tx_head - priv->tx_tail, recvs_pending(dev));
526 * assume the HW is wedged and just free up
527 * all our pending work requests.
529 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
530 tx_req = &priv->tx_ring[priv->tx_tail &
531 (ipoib_sendq_size - 1)];
532 dma_unmap_single(priv->ca->dma_device,
533 pci_unmap_addr(tx_req, mapping),
534 tx_req->skb->len,
535 DMA_TO_DEVICE);
536 dev_kfree_skb_any(tx_req->skb);
537 ++priv->tx_tail;
540 for (i = 0; i < ipoib_recvq_size; ++i)
541 if (priv->rx_ring[i].skb) {
542 dma_unmap_single(priv->ca->dma_device,
543 pci_unmap_addr(&priv->rx_ring[i],
544 mapping),
545 IPOIB_BUF_SIZE,
546 DMA_FROM_DEVICE);
547 dev_kfree_skb_any(priv->rx_ring[i].skb);
548 priv->rx_ring[i].skb = NULL;
551 goto timeout;
554 msleep(1);
557 ipoib_dbg(priv, "All sends and receives done.\n");
559 timeout:
560 qp_attr.qp_state = IB_QPS_RESET;
561 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
562 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
564 /* Wait for all AHs to be reaped */
565 set_bit(IPOIB_STOP_REAPER, &priv->flags);
566 cancel_delayed_work(&priv->ah_reap_task);
567 flush_workqueue(ipoib_workqueue);
569 begin = jiffies;
571 while (!list_empty(&priv->dead_ahs)) {
572 __ipoib_reap_ah(dev);
574 if (time_after(jiffies, begin + HZ)) {
575 ipoib_warn(priv, "timing out; will leak address handles\n");
576 break;
579 msleep(1);
582 return 0;
585 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
587 struct ipoib_dev_priv *priv = netdev_priv(dev);
589 priv->ca = ca;
590 priv->port = port;
591 priv->qp = NULL;
593 if (ipoib_transport_dev_init(dev, ca)) {
594 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
595 return -ENODEV;
598 if (dev->flags & IFF_UP) {
599 if (ipoib_ib_dev_open(dev)) {
600 ipoib_transport_dev_cleanup(dev);
601 return -ENODEV;
605 return 0;
608 void ipoib_ib_dev_flush(void *_dev)
610 struct net_device *dev = (struct net_device *)_dev;
611 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
613 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
614 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
615 return;
618 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
619 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
620 return;
623 ipoib_dbg(priv, "flushing\n");
625 ipoib_ib_dev_down(dev, 0);
628 * The device could have been brought down between the start and when
629 * we get here, don't bring it back up if it's not configured up
631 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
632 ipoib_ib_dev_up(dev);
634 mutex_lock(&priv->vlan_mutex);
636 /* Flush any child interfaces too */
637 list_for_each_entry(cpriv, &priv->child_intfs, list)
638 ipoib_ib_dev_flush(cpriv->dev);
640 mutex_unlock(&priv->vlan_mutex);
643 void ipoib_ib_dev_cleanup(struct net_device *dev)
645 struct ipoib_dev_priv *priv = netdev_priv(dev);
647 ipoib_dbg(priv, "cleaning up ib_dev\n");
649 ipoib_mcast_stop_thread(dev, 1);
650 ipoib_mcast_dev_flush(dev);
652 ipoib_transport_dev_cleanup(dev);
656 * Delayed P_Key Assigment Interim Support
658 * The following is initial implementation of delayed P_Key assigment
659 * mechanism. It is using the same approach implemented for the multicast
660 * group join. The single goal of this implementation is to quickly address
661 * Bug #2507. This implementation will probably be removed when the P_Key
662 * change async notification is available.
665 void ipoib_pkey_poll(void *dev_ptr)
667 struct net_device *dev = dev_ptr;
668 struct ipoib_dev_priv *priv = netdev_priv(dev);
670 ipoib_pkey_dev_check_presence(dev);
672 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
673 ipoib_open(dev);
674 else {
675 mutex_lock(&pkey_mutex);
676 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
677 queue_delayed_work(ipoib_workqueue,
678 &priv->pkey_task,
679 HZ);
680 mutex_unlock(&pkey_mutex);
684 int ipoib_pkey_dev_delay_open(struct net_device *dev)
686 struct ipoib_dev_priv *priv = netdev_priv(dev);
688 /* Look for the interface pkey value in the IB Port P_Key table and */
689 /* set the interface pkey assigment flag */
690 ipoib_pkey_dev_check_presence(dev);
692 /* P_Key value not assigned yet - start polling */
693 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
694 mutex_lock(&pkey_mutex);
695 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
696 queue_delayed_work(ipoib_workqueue,
697 &priv->pkey_task,
698 HZ);
699 mutex_unlock(&pkey_mutex);
700 return 1;
703 return 0;