2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/netdevice.h>
41 /* Spinlock must be held by caller */
42 static int t4_sched_class_fw_cmd(struct port_info
*pi
,
43 struct ch_sched_params
*p
,
46 struct adapter
*adap
= pi
->adapter
;
47 struct sched_table
*s
= pi
->sched_tbl
;
48 struct sched_class
*e
;
51 e
= &s
->tab
[p
->u
.params
.class];
54 err
= t4_sched_params(adap
, p
->type
,
55 p
->u
.params
.level
, p
->u
.params
.mode
,
58 p
->u
.params
.channel
, e
->idx
,
59 p
->u
.params
.minrate
, p
->u
.params
.maxrate
,
60 p
->u
.params
.weight
, p
->u
.params
.pktsize
);
70 /* Spinlock must be held by caller */
71 static int t4_sched_bind_unbind_op(struct port_info
*pi
, void *arg
,
72 enum sched_bind_type type
, bool bind
)
74 struct adapter
*adap
= pi
->adapter
;
75 u32 fw_mnem
, fw_class
, fw_param
;
76 unsigned int pf
= adap
->pf
;
82 struct sched_queue_entry
*qe
;
84 qe
= (struct sched_queue_entry
*)arg
;
86 /* Create a template for the FW_PARAMS_CMD mnemonic and
87 * value (TX Scheduling Class in this case).
89 fw_mnem
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
91 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH
));
92 fw_class
= bind
? qe
->param
.class : FW_SCHED_CLS_NONE
;
93 fw_param
= (fw_mnem
| FW_PARAMS_PARAM_YZ_V(qe
->cntxt_id
));
104 err
= t4_set_params(adap
, adap
->mbox
, pf
, vf
, 1, &fw_param
, &fw_class
);
110 static struct sched_class
*t4_sched_queue_lookup(struct port_info
*pi
,
111 const unsigned int qid
,
114 struct sched_table
*s
= pi
->sched_tbl
;
115 struct sched_class
*e
, *end
;
116 struct sched_class
*found
= NULL
;
119 /* Look for a class with matching bound queue parameters */
120 end
= &s
->tab
[s
->sched_size
];
121 for (e
= &s
->tab
[0]; e
!= end
; ++e
) {
122 struct sched_queue_entry
*qe
;
125 if (e
->state
== SCHED_STATE_UNUSED
)
128 list_for_each_entry(qe
, &e
->queue_list
, list
) {
129 if (qe
->cntxt_id
== qid
) {
145 static int t4_sched_queue_unbind(struct port_info
*pi
, struct ch_sched_queue
*p
)
147 struct adapter
*adap
= pi
->adapter
;
148 struct sched_class
*e
;
149 struct sched_queue_entry
*qe
= NULL
;
150 struct sge_eth_txq
*txq
;
155 if (p
->queue
< 0 || p
->queue
>= pi
->nqsets
)
158 txq
= &adap
->sge
.ethtxq
[pi
->first_qset
+ p
->queue
];
159 qid
= txq
->q
.cntxt_id
;
161 /* Find the existing class that the queue is bound to */
162 e
= t4_sched_queue_lookup(pi
, qid
, &index
);
163 if (e
&& index
>= 0) {
167 list_for_each_entry(qe
, &e
->queue_list
, list
) {
172 err
= t4_sched_bind_unbind_op(pi
, (void *)qe
, SCHED_QUEUE
,
175 spin_unlock(&e
->lock
);
181 if (atomic_dec_and_test(&e
->refcnt
)) {
182 e
->state
= SCHED_STATE_UNUSED
;
183 memset(&e
->info
, 0, sizeof(e
->info
));
185 spin_unlock(&e
->lock
);
191 static int t4_sched_queue_bind(struct port_info
*pi
, struct ch_sched_queue
*p
)
193 struct adapter
*adap
= pi
->adapter
;
194 struct sched_table
*s
= pi
->sched_tbl
;
195 struct sched_class
*e
;
196 struct sched_queue_entry
*qe
= NULL
;
197 struct sge_eth_txq
*txq
;
201 if (p
->queue
< 0 || p
->queue
>= pi
->nqsets
)
204 qe
= kvzalloc(sizeof(struct sched_queue_entry
), GFP_KERNEL
);
208 txq
= &adap
->sge
.ethtxq
[pi
->first_qset
+ p
->queue
];
209 qid
= txq
->q
.cntxt_id
;
211 /* Unbind queue from any existing class */
212 err
= t4_sched_queue_unbind(pi
, p
);
218 /* Bind queue to specified class */
219 memset(qe
, 0, sizeof(*qe
));
221 memcpy(&qe
->param
, p
, sizeof(qe
->param
));
223 e
= &s
->tab
[qe
->param
.class];
225 err
= t4_sched_bind_unbind_op(pi
, (void *)qe
, SCHED_QUEUE
, true);
228 spin_unlock(&e
->lock
);
232 list_add_tail(&qe
->list
, &e
->queue_list
);
233 atomic_inc(&e
->refcnt
);
234 spin_unlock(&e
->lock
);
239 static void t4_sched_class_unbind_all(struct port_info
*pi
,
240 struct sched_class
*e
,
241 enum sched_bind_type type
)
248 struct sched_queue_entry
*qe
;
250 list_for_each_entry(qe
, &e
->queue_list
, list
)
251 t4_sched_queue_unbind(pi
, &qe
->param
);
259 static int t4_sched_class_bind_unbind_op(struct port_info
*pi
, void *arg
,
260 enum sched_bind_type type
, bool bind
)
269 struct ch_sched_queue
*qe
= (struct ch_sched_queue
*)arg
;
272 err
= t4_sched_queue_bind(pi
, qe
);
274 err
= t4_sched_queue_unbind(pi
, qe
);
286 * cxgb4_sched_class_bind - Bind an entity to a scheduling class
287 * @dev: net_device pointer
288 * @arg: Entity opaque data
289 * @type: Entity type (Queue)
291 * Binds an entity (queue) to a scheduling class. If the entity
292 * is bound to another class, it will be unbound from the other class
293 * and bound to the class specified in @arg.
295 int cxgb4_sched_class_bind(struct net_device
*dev
, void *arg
,
296 enum sched_bind_type type
)
298 struct port_info
*pi
= netdev2pinfo(dev
);
299 struct sched_table
*s
;
311 struct ch_sched_queue
*qe
= (struct ch_sched_queue
*)arg
;
313 class_id
= qe
->class;
320 if (!valid_class_id(dev
, class_id
))
323 if (class_id
== SCHED_CLS_NONE
)
327 write_lock(&s
->rw_lock
);
328 err
= t4_sched_class_bind_unbind_op(pi
, arg
, type
, true);
329 write_unlock(&s
->rw_lock
);
335 * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
336 * @dev: net_device pointer
337 * @arg: Entity opaque data
338 * @type: Entity type (Queue)
340 * Unbinds an entity (queue) from a scheduling class.
342 int cxgb4_sched_class_unbind(struct net_device
*dev
, void *arg
,
343 enum sched_bind_type type
)
345 struct port_info
*pi
= netdev2pinfo(dev
);
346 struct sched_table
*s
;
358 struct ch_sched_queue
*qe
= (struct ch_sched_queue
*)arg
;
360 class_id
= qe
->class;
367 if (!valid_class_id(dev
, class_id
))
371 write_lock(&s
->rw_lock
);
372 err
= t4_sched_class_bind_unbind_op(pi
, arg
, type
, false);
373 write_unlock(&s
->rw_lock
);
378 /* If @p is NULL, fetch any available unused class */
379 static struct sched_class
*t4_sched_class_lookup(struct port_info
*pi
,
380 const struct ch_sched_params
*p
)
382 struct sched_table
*s
= pi
->sched_tbl
;
383 struct sched_class
*e
, *end
;
384 struct sched_class
*found
= NULL
;
387 /* Get any available unused class */
388 end
= &s
->tab
[s
->sched_size
];
389 for (e
= &s
->tab
[0]; e
!= end
; ++e
) {
390 if (e
->state
== SCHED_STATE_UNUSED
) {
396 /* Look for a class with matching scheduling parameters */
397 struct ch_sched_params info
;
398 struct ch_sched_params tp
;
400 memcpy(&tp
, p
, sizeof(tp
));
401 /* Don't try to match class parameter */
402 tp
.u
.params
.class = SCHED_CLS_NONE
;
404 end
= &s
->tab
[s
->sched_size
];
405 for (e
= &s
->tab
[0]; e
!= end
; ++e
) {
406 if (e
->state
== SCHED_STATE_UNUSED
)
409 memcpy(&info
, &e
->info
, sizeof(info
));
410 /* Don't try to match class parameter */
411 info
.u
.params
.class = SCHED_CLS_NONE
;
413 if ((info
.type
== tp
.type
) &&
414 (!memcmp(&info
.u
.params
, &tp
.u
.params
,
415 sizeof(info
.u
.params
)))) {
425 static struct sched_class
*t4_sched_class_alloc(struct port_info
*pi
,
426 struct ch_sched_params
*p
)
428 struct sched_table
*s
= pi
->sched_tbl
;
429 struct sched_class
*e
;
436 class_id
= p
->u
.params
.class;
438 /* Only accept search for existing class with matching params
439 * or allocation of new class with specified params
441 if (class_id
!= SCHED_CLS_NONE
)
444 write_lock(&s
->rw_lock
);
445 /* See if there's an exisiting class with same
446 * requested sched params
448 e
= t4_sched_class_lookup(pi
, p
);
450 struct ch_sched_params np
;
452 /* Fetch any available unused class */
453 e
= t4_sched_class_lookup(pi
, NULL
);
457 memcpy(&np
, p
, sizeof(np
));
458 np
.u
.params
.class = e
->idx
;
462 err
= t4_sched_class_fw_cmd(pi
, &np
, SCHED_FW_OP_ADD
);
464 spin_unlock(&e
->lock
);
468 memcpy(&e
->info
, &np
, sizeof(e
->info
));
469 atomic_set(&e
->refcnt
, 0);
470 e
->state
= SCHED_STATE_ACTIVE
;
471 spin_unlock(&e
->lock
);
475 write_unlock(&s
->rw_lock
);
480 * cxgb4_sched_class_alloc - allocate a scheduling class
481 * @dev: net_device pointer
482 * @p: new scheduling class to create.
484 * Returns pointer to the scheduling class created. If @p is NULL, then
485 * it allocates and returns any available unused scheduling class. If a
486 * scheduling class with matching @p is found, then the matching class is
489 struct sched_class
*cxgb4_sched_class_alloc(struct net_device
*dev
,
490 struct ch_sched_params
*p
)
492 struct port_info
*pi
= netdev2pinfo(dev
);
498 class_id
= p
->u
.params
.class;
499 if (!valid_class_id(dev
, class_id
))
502 return t4_sched_class_alloc(pi
, p
);
505 static void t4_sched_class_free(struct port_info
*pi
, struct sched_class
*e
)
507 t4_sched_class_unbind_all(pi
, e
, SCHED_QUEUE
);
510 struct sched_table
*t4_init_sched(unsigned int sched_size
)
512 struct sched_table
*s
;
515 s
= kvzalloc(sizeof(*s
) + sched_size
* sizeof(struct sched_class
), GFP_KERNEL
);
519 s
->sched_size
= sched_size
;
520 rwlock_init(&s
->rw_lock
);
522 for (i
= 0; i
< s
->sched_size
; i
++) {
523 memset(&s
->tab
[i
], 0, sizeof(struct sched_class
));
525 s
->tab
[i
].state
= SCHED_STATE_UNUSED
;
526 INIT_LIST_HEAD(&s
->tab
[i
].queue_list
);
527 spin_lock_init(&s
->tab
[i
].lock
);
528 atomic_set(&s
->tab
[i
].refcnt
, 0);
533 void t4_cleanup_sched(struct adapter
*adap
)
535 struct sched_table
*s
;
538 for_each_port(adap
, j
) {
539 struct port_info
*pi
= netdev2pinfo(adap
->port
[j
]);
542 for (i
= 0; i
< s
->sched_size
; i
++) {
543 struct sched_class
*e
;
545 write_lock(&s
->rw_lock
);
547 if (e
->state
== SCHED_STATE_ACTIVE
)
548 t4_sched_class_free(pi
, e
);
549 write_unlock(&s
->rw_lock
);