Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / core / cache.c
blobe9a409d7f4e2bc32ff130aa0c022e9c51887692c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56 bool enforce_security;
59 union ib_gid zgid;
60 EXPORT_SYMBOL(zgid);
62 static const struct ib_gid_attr zattr;
64 enum gid_attr_find_mask {
65 GID_ATTR_FIND_MASK_GID = 1UL << 0,
66 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
67 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
68 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
71 enum gid_table_entry_props {
72 GID_TABLE_ENTRY_INVALID = 1UL << 0,
73 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
76 enum gid_table_write_action {
77 GID_TABLE_WRITE_ACTION_ADD,
78 GID_TABLE_WRITE_ACTION_DEL,
79 /* MODIFY only updates the GID table. Currently only used by
80 * ib_cache_update.
82 GID_TABLE_WRITE_ACTION_MODIFY
85 struct ib_gid_table_entry {
86 unsigned long props;
87 union ib_gid gid;
88 struct ib_gid_attr attr;
89 void *context;
92 struct ib_gid_table {
93 int sz;
94 /* In RoCE, adding a GID to the table requires:
95 * (a) Find if this GID is already exists.
96 * (b) Find a free space.
97 * (c) Write the new GID
99 * Delete requires different set of operations:
100 * (a) Find the GID
101 * (b) Delete it.
103 * Add/delete should be carried out atomically.
104 * This is done by locking this mutex from multiple
105 * writers. We don't need this lock for IB, as the MAD
106 * layer replaces all entries. All data_vec entries
107 * are locked by this lock.
109 struct mutex lock;
110 /* This lock protects the table entries from being
111 * read and written simultaneously.
113 rwlock_t rwlock;
114 struct ib_gid_table_entry *data_vec;
117 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
119 if (rdma_cap_roce_gid_table(ib_dev, port)) {
120 struct ib_event event;
122 event.device = ib_dev;
123 event.element.port_num = port;
124 event.event = IB_EVENT_GID_CHANGE;
126 ib_dispatch_event(&event);
130 static const char * const gid_type_str[] = {
131 [IB_GID_TYPE_IB] = "IB/RoCE v1",
132 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
135 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
137 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
138 return gid_type_str[gid_type];
140 return "Invalid GID type";
142 EXPORT_SYMBOL(ib_cache_gid_type_str);
144 int ib_cache_gid_parse_type_str(const char *buf)
146 unsigned int i;
147 size_t len;
148 int err = -EINVAL;
150 len = strlen(buf);
151 if (len == 0)
152 return -EINVAL;
154 if (buf[len - 1] == '\n')
155 len--;
157 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
158 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
159 len == strlen(gid_type_str[i])) {
160 err = i;
161 break;
164 return err;
166 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
168 /* This function expects that rwlock will be write locked in all
169 * scenarios and that lock will be locked in sleep-able (RoCE)
170 * scenarios.
172 static int write_gid(struct ib_device *ib_dev, u8 port,
173 struct ib_gid_table *table, int ix,
174 const union ib_gid *gid,
175 const struct ib_gid_attr *attr,
176 enum gid_table_write_action action,
177 bool default_gid)
178 __releases(&table->rwlock) __acquires(&table->rwlock)
180 int ret = 0;
181 struct net_device *old_net_dev;
182 enum ib_gid_type old_gid_type;
184 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
185 * sleep-able lock.
188 if (rdma_cap_roce_gid_table(ib_dev, port)) {
189 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
190 write_unlock_irq(&table->rwlock);
191 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
192 * RoCE providers and thus only updates the cache.
194 if (action == GID_TABLE_WRITE_ACTION_ADD)
195 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
196 &table->data_vec[ix].context);
197 else if (action == GID_TABLE_WRITE_ACTION_DEL)
198 ret = ib_dev->del_gid(ib_dev, port, ix,
199 &table->data_vec[ix].context);
200 write_lock_irq(&table->rwlock);
203 old_net_dev = table->data_vec[ix].attr.ndev;
204 old_gid_type = table->data_vec[ix].attr.gid_type;
205 if (old_net_dev && old_net_dev != attr->ndev)
206 dev_put(old_net_dev);
207 /* if modify_gid failed, just delete the old gid */
208 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
209 gid = &zgid;
210 attr = &zattr;
211 table->data_vec[ix].context = NULL;
214 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
215 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
216 if (default_gid) {
217 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
218 if (action == GID_TABLE_WRITE_ACTION_DEL)
219 table->data_vec[ix].attr.gid_type = old_gid_type;
221 if (table->data_vec[ix].attr.ndev &&
222 table->data_vec[ix].attr.ndev != old_net_dev)
223 dev_hold(table->data_vec[ix].attr.ndev);
225 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
227 return ret;
230 static int add_gid(struct ib_device *ib_dev, u8 port,
231 struct ib_gid_table *table, int ix,
232 const union ib_gid *gid,
233 const struct ib_gid_attr *attr,
234 bool default_gid) {
235 return write_gid(ib_dev, port, table, ix, gid, attr,
236 GID_TABLE_WRITE_ACTION_ADD, default_gid);
239 static int modify_gid(struct ib_device *ib_dev, u8 port,
240 struct ib_gid_table *table, int ix,
241 const union ib_gid *gid,
242 const struct ib_gid_attr *attr,
243 bool default_gid) {
244 return write_gid(ib_dev, port, table, ix, gid, attr,
245 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
248 static int del_gid(struct ib_device *ib_dev, u8 port,
249 struct ib_gid_table *table, int ix,
250 bool default_gid) {
251 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
252 GID_TABLE_WRITE_ACTION_DEL, default_gid);
255 /* rwlock should be read locked */
256 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
257 const struct ib_gid_attr *val, bool default_gid,
258 unsigned long mask, int *pempty)
260 int i = 0;
261 int found = -1;
262 int empty = pempty ? -1 : 0;
264 while (i < table->sz && (found < 0 || empty < 0)) {
265 struct ib_gid_table_entry *data = &table->data_vec[i];
266 struct ib_gid_attr *attr = &data->attr;
267 int curr_index = i;
269 i++;
271 if (data->props & GID_TABLE_ENTRY_INVALID)
272 continue;
274 if (empty < 0)
275 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
276 !memcmp(attr, &zattr, sizeof(*attr)) &&
277 !data->props)
278 empty = curr_index;
280 if (found >= 0)
281 continue;
283 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
284 attr->gid_type != val->gid_type)
285 continue;
287 if (mask & GID_ATTR_FIND_MASK_GID &&
288 memcmp(gid, &data->gid, sizeof(*gid)))
289 continue;
291 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
292 attr->ndev != val->ndev)
293 continue;
295 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
296 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
297 default_gid)
298 continue;
300 found = curr_index;
303 if (pempty)
304 *pempty = empty;
306 return found;
309 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
311 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
312 addrconf_ifid_eui48(&gid->raw[8], dev);
315 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
316 union ib_gid *gid, struct ib_gid_attr *attr)
318 struct ib_gid_table *table;
319 int ix;
320 int ret = 0;
321 struct net_device *idev;
322 int empty;
324 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
326 if (!memcmp(gid, &zgid, sizeof(*gid)))
327 return -EINVAL;
329 if (ib_dev->get_netdev) {
330 idev = ib_dev->get_netdev(ib_dev, port);
331 if (idev && attr->ndev != idev) {
332 union ib_gid default_gid;
334 /* Adding default GIDs in not permitted */
335 make_default_gid(idev, &default_gid);
336 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
337 dev_put(idev);
338 return -EPERM;
341 if (idev)
342 dev_put(idev);
345 mutex_lock(&table->lock);
346 write_lock_irq(&table->rwlock);
348 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
349 GID_ATTR_FIND_MASK_GID_TYPE |
350 GID_ATTR_FIND_MASK_NETDEV, &empty);
351 if (ix >= 0)
352 goto out_unlock;
354 if (empty < 0) {
355 ret = -ENOSPC;
356 goto out_unlock;
359 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
360 if (!ret)
361 dispatch_gid_change_event(ib_dev, port);
363 out_unlock:
364 write_unlock_irq(&table->rwlock);
365 mutex_unlock(&table->lock);
366 return ret;
369 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
370 union ib_gid *gid, struct ib_gid_attr *attr)
372 struct ib_gid_table *table;
373 int ix;
375 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
377 mutex_lock(&table->lock);
378 write_lock_irq(&table->rwlock);
380 ix = find_gid(table, gid, attr, false,
381 GID_ATTR_FIND_MASK_GID |
382 GID_ATTR_FIND_MASK_GID_TYPE |
383 GID_ATTR_FIND_MASK_NETDEV |
384 GID_ATTR_FIND_MASK_DEFAULT,
385 NULL);
386 if (ix < 0)
387 goto out_unlock;
389 if (!del_gid(ib_dev, port, table, ix, false))
390 dispatch_gid_change_event(ib_dev, port);
392 out_unlock:
393 write_unlock_irq(&table->rwlock);
394 mutex_unlock(&table->lock);
395 return 0;
398 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
399 struct net_device *ndev)
401 struct ib_gid_table *table;
402 int ix;
403 bool deleted = false;
405 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
407 mutex_lock(&table->lock);
408 write_lock_irq(&table->rwlock);
410 for (ix = 0; ix < table->sz; ix++)
411 if (table->data_vec[ix].attr.ndev == ndev)
412 if (!del_gid(ib_dev, port, table, ix,
413 !!(table->data_vec[ix].props &
414 GID_TABLE_ENTRY_DEFAULT)))
415 deleted = true;
417 write_unlock_irq(&table->rwlock);
418 mutex_unlock(&table->lock);
420 if (deleted)
421 dispatch_gid_change_event(ib_dev, port);
423 return 0;
426 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
427 union ib_gid *gid, struct ib_gid_attr *attr)
429 struct ib_gid_table *table;
431 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
433 if (index < 0 || index >= table->sz)
434 return -EINVAL;
436 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
437 return -EAGAIN;
439 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
440 if (attr) {
441 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
442 if (attr->ndev)
443 dev_hold(attr->ndev);
446 return 0;
449 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
450 const union ib_gid *gid,
451 const struct ib_gid_attr *val,
452 unsigned long mask,
453 u8 *port, u16 *index)
455 struct ib_gid_table *table;
456 u8 p;
457 int local_index;
458 unsigned long flags;
460 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
461 table = ib_dev->cache.ports[p].gid;
462 read_lock_irqsave(&table->rwlock, flags);
463 local_index = find_gid(table, gid, val, false, mask, NULL);
464 if (local_index >= 0) {
465 if (index)
466 *index = local_index;
467 if (port)
468 *port = p + rdma_start_port(ib_dev);
469 read_unlock_irqrestore(&table->rwlock, flags);
470 return 0;
472 read_unlock_irqrestore(&table->rwlock, flags);
475 return -ENOENT;
478 static int ib_cache_gid_find(struct ib_device *ib_dev,
479 const union ib_gid *gid,
480 enum ib_gid_type gid_type,
481 struct net_device *ndev, u8 *port,
482 u16 *index)
484 unsigned long mask = GID_ATTR_FIND_MASK_GID |
485 GID_ATTR_FIND_MASK_GID_TYPE;
486 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
488 if (ndev)
489 mask |= GID_ATTR_FIND_MASK_NETDEV;
491 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
492 mask, port, index);
495 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
496 const union ib_gid *gid,
497 enum ib_gid_type gid_type,
498 u8 port, struct net_device *ndev,
499 u16 *index)
501 int local_index;
502 struct ib_gid_table *table;
503 unsigned long mask = GID_ATTR_FIND_MASK_GID |
504 GID_ATTR_FIND_MASK_GID_TYPE;
505 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
506 unsigned long flags;
508 if (!rdma_is_port_valid(ib_dev, port))
509 return -ENOENT;
511 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
513 if (ndev)
514 mask |= GID_ATTR_FIND_MASK_NETDEV;
516 read_lock_irqsave(&table->rwlock, flags);
517 local_index = find_gid(table, gid, &val, false, mask, NULL);
518 if (local_index >= 0) {
519 if (index)
520 *index = local_index;
521 read_unlock_irqrestore(&table->rwlock, flags);
522 return 0;
525 read_unlock_irqrestore(&table->rwlock, flags);
526 return -ENOENT;
528 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
531 * ib_find_gid_by_filter - Returns the GID table index where a specified
532 * GID value occurs
533 * @device: The device to query.
534 * @gid: The GID value to search for.
535 * @port_num: The port number of the device where the GID value could be
536 * searched.
537 * @filter: The filter function is executed on any matching GID in the table.
538 * If the filter function returns true, the corresponding index is returned,
539 * otherwise, we continue searching the GID table. It's guaranteed that
540 * while filter is executed, ndev field is valid and the structure won't
541 * change. filter is executed in an atomic context. filter must not be NULL.
542 * @index: The index into the cached GID table where the GID was found. This
543 * parameter may be NULL.
545 * ib_cache_gid_find_by_filter() searches for the specified GID value
546 * of which the filter function returns true in the port's GID table.
547 * This function is only supported on RoCE ports.
550 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
551 const union ib_gid *gid,
552 u8 port,
553 bool (*filter)(const union ib_gid *,
554 const struct ib_gid_attr *,
555 void *),
556 void *context,
557 u16 *index)
559 struct ib_gid_table *table;
560 unsigned int i;
561 unsigned long flags;
562 bool found = false;
565 if (!rdma_is_port_valid(ib_dev, port) ||
566 !rdma_protocol_roce(ib_dev, port))
567 return -EPROTONOSUPPORT;
569 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
571 read_lock_irqsave(&table->rwlock, flags);
572 for (i = 0; i < table->sz; i++) {
573 struct ib_gid_attr attr;
575 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
576 continue;
578 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
579 continue;
581 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
583 if (filter(gid, &attr, context)) {
584 found = true;
585 if (index)
586 *index = i;
587 break;
590 read_unlock_irqrestore(&table->rwlock, flags);
592 if (!found)
593 return -ENOENT;
594 return 0;
597 static struct ib_gid_table *alloc_gid_table(int sz)
599 struct ib_gid_table *table =
600 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
602 if (!table)
603 return NULL;
605 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
606 if (!table->data_vec)
607 goto err_free_table;
609 mutex_init(&table->lock);
611 table->sz = sz;
612 rwlock_init(&table->rwlock);
614 return table;
616 err_free_table:
617 kfree(table);
618 return NULL;
621 static void release_gid_table(struct ib_gid_table *table)
623 if (table) {
624 kfree(table->data_vec);
625 kfree(table);
629 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
630 struct ib_gid_table *table)
632 int i;
633 bool deleted = false;
635 if (!table)
636 return;
638 write_lock_irq(&table->rwlock);
639 for (i = 0; i < table->sz; ++i) {
640 if (memcmp(&table->data_vec[i].gid, &zgid,
641 sizeof(table->data_vec[i].gid)))
642 if (!del_gid(ib_dev, port, table, i,
643 table->data_vec[i].props &
644 GID_ATTR_FIND_MASK_DEFAULT))
645 deleted = true;
647 write_unlock_irq(&table->rwlock);
649 if (deleted)
650 dispatch_gid_change_event(ib_dev, port);
653 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
654 struct net_device *ndev,
655 unsigned long gid_type_mask,
656 enum ib_cache_gid_default_mode mode)
658 union ib_gid gid;
659 struct ib_gid_attr gid_attr;
660 struct ib_gid_attr zattr_type = zattr;
661 struct ib_gid_table *table;
662 unsigned int gid_type;
664 table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
666 make_default_gid(ndev, &gid);
667 memset(&gid_attr, 0, sizeof(gid_attr));
668 gid_attr.ndev = ndev;
670 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
671 int ix;
672 union ib_gid current_gid;
673 struct ib_gid_attr current_gid_attr = {};
675 if (1UL << gid_type & ~gid_type_mask)
676 continue;
678 gid_attr.gid_type = gid_type;
680 mutex_lock(&table->lock);
681 write_lock_irq(&table->rwlock);
682 ix = find_gid(table, NULL, &gid_attr, true,
683 GID_ATTR_FIND_MASK_GID_TYPE |
684 GID_ATTR_FIND_MASK_DEFAULT,
685 NULL);
687 /* Coudn't find default GID location */
688 if (WARN_ON(ix < 0))
689 goto release;
691 zattr_type.gid_type = gid_type;
693 if (!__ib_cache_gid_get(ib_dev, port, ix,
694 &current_gid, &current_gid_attr) &&
695 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
696 !memcmp(&gid, &current_gid, sizeof(gid)) &&
697 !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
698 goto release;
700 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
701 memcmp(&current_gid_attr, &zattr_type,
702 sizeof(current_gid_attr))) {
703 if (del_gid(ib_dev, port, table, ix, true)) {
704 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
705 ix, gid.raw);
706 goto release;
707 } else {
708 dispatch_gid_change_event(ib_dev, port);
712 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
713 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
714 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
715 gid.raw);
716 else
717 dispatch_gid_change_event(ib_dev, port);
720 release:
721 if (current_gid_attr.ndev)
722 dev_put(current_gid_attr.ndev);
723 write_unlock_irq(&table->rwlock);
724 mutex_unlock(&table->lock);
728 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
729 struct ib_gid_table *table)
731 unsigned int i;
732 unsigned long roce_gid_type_mask;
733 unsigned int num_default_gids;
734 unsigned int current_gid = 0;
736 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
737 num_default_gids = hweight_long(roce_gid_type_mask);
738 for (i = 0; i < num_default_gids && i < table->sz; i++) {
739 struct ib_gid_table_entry *entry =
740 &table->data_vec[i];
742 entry->props |= GID_TABLE_ENTRY_DEFAULT;
743 current_gid = find_next_bit(&roce_gid_type_mask,
744 BITS_PER_LONG,
745 current_gid);
746 entry->attr.gid_type = current_gid++;
749 return 0;
752 static int _gid_table_setup_one(struct ib_device *ib_dev)
754 u8 port;
755 struct ib_gid_table *table;
756 int err = 0;
758 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
759 u8 rdma_port = port + rdma_start_port(ib_dev);
761 table =
762 alloc_gid_table(
763 ib_dev->port_immutable[rdma_port].gid_tbl_len);
764 if (!table) {
765 err = -ENOMEM;
766 goto rollback_table_setup;
769 err = gid_table_reserve_default(ib_dev,
770 port + rdma_start_port(ib_dev),
771 table);
772 if (err)
773 goto rollback_table_setup;
774 ib_dev->cache.ports[port].gid = table;
777 return 0;
779 rollback_table_setup:
780 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
781 table = ib_dev->cache.ports[port].gid;
783 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
784 table);
785 release_gid_table(table);
788 return err;
791 static void gid_table_release_one(struct ib_device *ib_dev)
793 struct ib_gid_table *table;
794 u8 port;
796 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
797 table = ib_dev->cache.ports[port].gid;
798 release_gid_table(table);
799 ib_dev->cache.ports[port].gid = NULL;
803 static void gid_table_cleanup_one(struct ib_device *ib_dev)
805 struct ib_gid_table *table;
806 u8 port;
808 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
809 table = ib_dev->cache.ports[port].gid;
810 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
811 table);
815 static int gid_table_setup_one(struct ib_device *ib_dev)
817 int err;
819 err = _gid_table_setup_one(ib_dev);
821 if (err)
822 return err;
824 rdma_roce_rescan_device(ib_dev);
826 return err;
829 int ib_get_cached_gid(struct ib_device *device,
830 u8 port_num,
831 int index,
832 union ib_gid *gid,
833 struct ib_gid_attr *gid_attr)
835 int res;
836 unsigned long flags;
837 struct ib_gid_table *table;
839 if (!rdma_is_port_valid(device, port_num))
840 return -EINVAL;
842 table = device->cache.ports[port_num - rdma_start_port(device)].gid;
843 read_lock_irqsave(&table->rwlock, flags);
844 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
845 read_unlock_irqrestore(&table->rwlock, flags);
847 return res;
849 EXPORT_SYMBOL(ib_get_cached_gid);
851 int ib_find_cached_gid(struct ib_device *device,
852 const union ib_gid *gid,
853 enum ib_gid_type gid_type,
854 struct net_device *ndev,
855 u8 *port_num,
856 u16 *index)
858 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
860 EXPORT_SYMBOL(ib_find_cached_gid);
862 int ib_find_gid_by_filter(struct ib_device *device,
863 const union ib_gid *gid,
864 u8 port_num,
865 bool (*filter)(const union ib_gid *gid,
866 const struct ib_gid_attr *,
867 void *),
868 void *context, u16 *index)
870 /* Only RoCE GID table supports filter function */
871 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
872 return -EPROTONOSUPPORT;
874 return ib_cache_gid_find_by_filter(device, gid,
875 port_num, filter,
876 context, index);
879 int ib_get_cached_pkey(struct ib_device *device,
880 u8 port_num,
881 int index,
882 u16 *pkey)
884 struct ib_pkey_cache *cache;
885 unsigned long flags;
886 int ret = 0;
888 if (!rdma_is_port_valid(device, port_num))
889 return -EINVAL;
891 read_lock_irqsave(&device->cache.lock, flags);
893 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
895 if (index < 0 || index >= cache->table_len)
896 ret = -EINVAL;
897 else
898 *pkey = cache->table[index];
900 read_unlock_irqrestore(&device->cache.lock, flags);
902 return ret;
904 EXPORT_SYMBOL(ib_get_cached_pkey);
906 int ib_get_cached_subnet_prefix(struct ib_device *device,
907 u8 port_num,
908 u64 *sn_pfx)
910 unsigned long flags;
911 int p;
913 if (port_num < rdma_start_port(device) ||
914 port_num > rdma_end_port(device))
915 return -EINVAL;
917 p = port_num - rdma_start_port(device);
918 read_lock_irqsave(&device->cache.lock, flags);
919 *sn_pfx = device->cache.ports[p].subnet_prefix;
920 read_unlock_irqrestore(&device->cache.lock, flags);
922 return 0;
924 EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
926 int ib_find_cached_pkey(struct ib_device *device,
927 u8 port_num,
928 u16 pkey,
929 u16 *index)
931 struct ib_pkey_cache *cache;
932 unsigned long flags;
933 int i;
934 int ret = -ENOENT;
935 int partial_ix = -1;
937 if (!rdma_is_port_valid(device, port_num))
938 return -EINVAL;
940 read_lock_irqsave(&device->cache.lock, flags);
942 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
944 *index = -1;
946 for (i = 0; i < cache->table_len; ++i)
947 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
948 if (cache->table[i] & 0x8000) {
949 *index = i;
950 ret = 0;
951 break;
952 } else
953 partial_ix = i;
956 if (ret && partial_ix >= 0) {
957 *index = partial_ix;
958 ret = 0;
961 read_unlock_irqrestore(&device->cache.lock, flags);
963 return ret;
965 EXPORT_SYMBOL(ib_find_cached_pkey);
967 int ib_find_exact_cached_pkey(struct ib_device *device,
968 u8 port_num,
969 u16 pkey,
970 u16 *index)
972 struct ib_pkey_cache *cache;
973 unsigned long flags;
974 int i;
975 int ret = -ENOENT;
977 if (!rdma_is_port_valid(device, port_num))
978 return -EINVAL;
980 read_lock_irqsave(&device->cache.lock, flags);
982 cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
984 *index = -1;
986 for (i = 0; i < cache->table_len; ++i)
987 if (cache->table[i] == pkey) {
988 *index = i;
989 ret = 0;
990 break;
993 read_unlock_irqrestore(&device->cache.lock, flags);
995 return ret;
997 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
999 int ib_get_cached_lmc(struct ib_device *device,
1000 u8 port_num,
1001 u8 *lmc)
1003 unsigned long flags;
1004 int ret = 0;
1006 if (!rdma_is_port_valid(device, port_num))
1007 return -EINVAL;
1009 read_lock_irqsave(&device->cache.lock, flags);
1010 *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
1011 read_unlock_irqrestore(&device->cache.lock, flags);
1013 return ret;
1015 EXPORT_SYMBOL(ib_get_cached_lmc);
1017 int ib_get_cached_port_state(struct ib_device *device,
1018 u8 port_num,
1019 enum ib_port_state *port_state)
1021 unsigned long flags;
1022 int ret = 0;
1024 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1025 return -EINVAL;
1027 read_lock_irqsave(&device->cache.lock, flags);
1028 *port_state = device->cache.ports[port_num
1029 - rdma_start_port(device)].port_state;
1030 read_unlock_irqrestore(&device->cache.lock, flags);
1032 return ret;
1034 EXPORT_SYMBOL(ib_get_cached_port_state);
1036 static void ib_cache_update(struct ib_device *device,
1037 u8 port,
1038 bool enforce_security)
1040 struct ib_port_attr *tprops = NULL;
1041 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1042 struct ib_gid_cache {
1043 int table_len;
1044 union ib_gid table[0];
1045 } *gid_cache = NULL;
1046 int i;
1047 int ret;
1048 struct ib_gid_table *table;
1049 bool use_roce_gid_table =
1050 rdma_cap_roce_gid_table(device, port);
1052 if (!rdma_is_port_valid(device, port))
1053 return;
1055 table = device->cache.ports[port - rdma_start_port(device)].gid;
1057 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1058 if (!tprops)
1059 return;
1061 ret = ib_query_port(device, port, tprops);
1062 if (ret) {
1063 pr_warn("ib_query_port failed (%d) for %s\n",
1064 ret, device->name);
1065 goto err;
1068 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1069 sizeof *pkey_cache->table, GFP_KERNEL);
1070 if (!pkey_cache)
1071 goto err;
1073 pkey_cache->table_len = tprops->pkey_tbl_len;
1075 if (!use_roce_gid_table) {
1076 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1077 sizeof(*gid_cache->table), GFP_KERNEL);
1078 if (!gid_cache)
1079 goto err;
1081 gid_cache->table_len = tprops->gid_tbl_len;
1084 for (i = 0; i < pkey_cache->table_len; ++i) {
1085 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1086 if (ret) {
1087 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1088 ret, device->name, i);
1089 goto err;
1093 if (!use_roce_gid_table) {
1094 for (i = 0; i < gid_cache->table_len; ++i) {
1095 ret = ib_query_gid(device, port, i,
1096 gid_cache->table + i, NULL);
1097 if (ret) {
1098 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1099 ret, device->name, i);
1100 goto err;
1105 write_lock_irq(&device->cache.lock);
1107 old_pkey_cache = device->cache.ports[port -
1108 rdma_start_port(device)].pkey;
1110 device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1111 if (!use_roce_gid_table) {
1112 write_lock(&table->rwlock);
1113 for (i = 0; i < gid_cache->table_len; i++) {
1114 modify_gid(device, port, table, i, gid_cache->table + i,
1115 &zattr, false);
1117 write_unlock(&table->rwlock);
1120 device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1121 device->cache.ports[port - rdma_start_port(device)].port_state =
1122 tprops->state;
1124 device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
1125 tprops->subnet_prefix;
1126 write_unlock_irq(&device->cache.lock);
1128 if (enforce_security)
1129 ib_security_cache_change(device,
1130 port,
1131 tprops->subnet_prefix);
1133 kfree(gid_cache);
1134 kfree(old_pkey_cache);
1135 kfree(tprops);
1136 return;
1138 err:
1139 kfree(pkey_cache);
1140 kfree(gid_cache);
1141 kfree(tprops);
1144 static void ib_cache_task(struct work_struct *_work)
1146 struct ib_update_work *work =
1147 container_of(_work, struct ib_update_work, work);
1149 ib_cache_update(work->device,
1150 work->port_num,
1151 work->enforce_security);
1152 kfree(work);
1155 static void ib_cache_event(struct ib_event_handler *handler,
1156 struct ib_event *event)
1158 struct ib_update_work *work;
1160 if (event->event == IB_EVENT_PORT_ERR ||
1161 event->event == IB_EVENT_PORT_ACTIVE ||
1162 event->event == IB_EVENT_LID_CHANGE ||
1163 event->event == IB_EVENT_PKEY_CHANGE ||
1164 event->event == IB_EVENT_SM_CHANGE ||
1165 event->event == IB_EVENT_CLIENT_REREGISTER ||
1166 event->event == IB_EVENT_GID_CHANGE) {
1167 work = kmalloc(sizeof *work, GFP_ATOMIC);
1168 if (work) {
1169 INIT_WORK(&work->work, ib_cache_task);
1170 work->device = event->device;
1171 work->port_num = event->element.port_num;
1172 if (event->event == IB_EVENT_PKEY_CHANGE ||
1173 event->event == IB_EVENT_GID_CHANGE)
1174 work->enforce_security = true;
1175 else
1176 work->enforce_security = false;
1178 queue_work(ib_wq, &work->work);
1183 int ib_cache_setup_one(struct ib_device *device)
1185 int p;
1186 int err;
1188 rwlock_init(&device->cache.lock);
1190 device->cache.ports =
1191 kzalloc(sizeof(*device->cache.ports) *
1192 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1193 if (!device->cache.ports)
1194 return -ENOMEM;
1196 err = gid_table_setup_one(device);
1197 if (err) {
1198 kfree(device->cache.ports);
1199 device->cache.ports = NULL;
1200 return err;
1203 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1204 ib_cache_update(device, p + rdma_start_port(device), true);
1206 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1207 device, ib_cache_event);
1208 ib_register_event_handler(&device->cache.event_handler);
1209 return 0;
1212 void ib_cache_release_one(struct ib_device *device)
1214 int p;
1217 * The release function frees all the cache elements.
1218 * This function should be called as part of freeing
1219 * all the device's resources when the cache could no
1220 * longer be accessed.
1222 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1223 kfree(device->cache.ports[p].pkey);
1225 gid_table_release_one(device);
1226 kfree(device->cache.ports);
1229 void ib_cache_cleanup_one(struct ib_device *device)
1231 /* The cleanup function unregisters the event handler,
1232 * waits for all in-progress workqueue elements and cleans
1233 * up the GID cache. This function should be called after
1234 * the device was removed from the devices list and all
1235 * clients were removed, so the cache exists but is
1236 * non-functional and shouldn't be updated anymore.
1238 ib_unregister_event_handler(&device->cache.event_handler);
1239 flush_workqueue(ib_wq);
1240 gid_table_cleanup_one(device);
1243 void __init ib_cache_setup(void)
1245 roce_gid_mgmt_init();
1248 void __exit ib_cache_cleanup(void)
1250 roce_gid_mgmt_cleanup();