4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2017 Joyent, Inc.
28 * General Soft rings - Simulating Rx rings in S/W.
30 * Soft ring is a data abstraction containing a queue and a worker
31 * thread and represents a hardware Rx ring in software. Each soft
32 * ring set can have a collection of soft rings for separating
33 * L3/L4 specific traffic (IPv4 from IPv6 or TCP from UDP) or for
34 * allowing a higher degree of parallelism by sending traffic to
35 * one of the soft rings for a SRS (using a hash on src IP or port).
36 * Each soft ring worker thread can be bound to a different CPU
37 * allowing the processing for each soft ring to happen in parallel
38 * and independent from each other.
40 * Protocol soft rings:
42 * Each SRS has at an minimum 3 softrings. One each for IPv4 TCP,
43 * IPv4 UDP and rest (OTH - for IPv6 and everything else). The
44 * SRS does dynamic polling and enforces link level bandwidth but
45 * it does so for all traffic (IPv4 and IPv6 and all protocols) on
46 * that link. However, each protocol layer wants a different
47 * behaviour. For instance IPv4 TCP has per CPU squeues which
48 * enforce their own polling and flow control so IPv4 TCP traffic
49 * needs to go to a separate soft ring which can be polled by the
50 * TCP squeue. It also allows TCP squeue to push back flow control
51 * all the way to NIC hardware (if it puts its corresponding soft
52 * ring in the poll mode and soft ring queue builds up, the
53 * shared srs_poll_pkt_cnt goes up and SRS automatically stops
54 * more packets from entering the system).
56 * Similarly, the UDP benefits from a DLS bypass and packet chaining
57 * so sending it to a separate soft ring is desired. All the rest of
58 * the traffic (including IPv6 is sent to OTH softring). The IPv6
59 * traffic current goes through OTH softring and via DLS because
60 * it need more processing to be done. Irrespective of the sap
61 * (IPv4 or IPv6) or the transport, the dynamic polling, B/W enforcement,
62 * cpu assignment, fanout, etc apply to all traffic since they
63 * are implement by the SRS which is agnostic to sap or transport.
67 * On a multithreaded system, we can assign more CPU and multi thread
68 * the stack by creating a soft ring per CPU and spreading traffic
69 * based on a hash computed on src IP etc. Since we still need to
70 * keep the protocol separation, we create a set of 3 soft ring per
71 * CPU (specified by cpu list or degree of fanout).
73 * NOTE: See the block level comment on top of mac_sched.c
76 #include <sys/types.h>
77 #include <sys/callb.h>
79 #include <sys/strsubr.h>
80 #include <sys/strsun.h>
82 #include <inet/ipsec_impl.h>
83 #include <inet/ip_impl.h>
84 #include <inet/sadb.h>
85 #include <inet/ipsecesp.h>
86 #include <inet/ipsecah.h>
88 #include <sys/mac_impl.h>
89 #include <sys/mac_client_impl.h>
90 #include <sys/mac_soft_ring.h>
91 #include <sys/mac_flow_impl.h>
92 #include <sys/mac_stat.h>
94 static void mac_rx_soft_ring_drain(mac_soft_ring_t
*);
95 static void mac_soft_ring_fire(void *);
96 static void mac_soft_ring_worker(mac_soft_ring_t
*);
97 static void mac_tx_soft_ring_drain(mac_soft_ring_t
*);
99 uint32_t mac_tx_soft_ring_max_q_cnt
= 100000;
100 uint32_t mac_tx_soft_ring_hiwat
= 1000;
102 extern kmem_cache_t
*mac_soft_ring_cache
;
104 #define ADD_SOFTRING_TO_SET(mac_srs, softring) { \
105 if (mac_srs->srs_soft_ring_head == NULL) { \
106 mac_srs->srs_soft_ring_head = softring; \
107 mac_srs->srs_soft_ring_tail = softring; \
109 /* ADD to the list */ \
110 softring->s_ring_prev = \
111 mac_srs->srs_soft_ring_tail; \
112 mac_srs->srs_soft_ring_tail->s_ring_next = softring; \
113 mac_srs->srs_soft_ring_tail = softring; \
115 mac_srs->srs_soft_ring_count++; \
119 * mac_soft_ring_worker_wakeup
121 * Wake up the soft ring worker thread to process the queue as long
122 * as no one else is processing it and upper layer (client) is still
123 * ready to receive packets.
126 mac_soft_ring_worker_wakeup(mac_soft_ring_t
*ringp
)
128 ASSERT(MUTEX_HELD(&ringp
->s_ring_lock
));
129 if (!(ringp
->s_ring_state
& S_RING_PROC
) &&
130 !(ringp
->s_ring_state
& S_RING_BLANK
) &&
131 (ringp
->s_ring_tid
== NULL
)) {
132 if (ringp
->s_ring_wait
!= 0) {
134 timeout(mac_soft_ring_fire
, ringp
,
137 /* Schedule the worker thread. */
138 cv_signal(&ringp
->s_ring_async
);
144 * mac_soft_ring_create
146 * Create a soft ring, do the necessary setup and bind the worker
147 * thread to the assigned CPU.
150 mac_soft_ring_create(int id
, clock_t wait
, uint16_t type
,
151 pri_t pri
, mac_client_impl_t
*mcip
, mac_soft_ring_set_t
*mac_srs
,
152 processorid_t cpuid
, mac_direct_rx_t rx_func
, void *x_arg1
,
153 mac_resource_handle_t x_arg2
)
155 mac_soft_ring_t
*ringp
;
156 char name
[S_RING_NAMELEN
];
159 ringp
= kmem_cache_alloc(mac_soft_ring_cache
, KM_SLEEP
);
161 if (type
& ST_RING_TCP
) {
162 (void) snprintf(name
, sizeof (name
),
163 "mac_tcp_soft_ring_%d_%p", id
, (void *)mac_srs
);
164 } else if (type
& ST_RING_UDP
) {
165 (void) snprintf(name
, sizeof (name
),
166 "mac_udp_soft_ring_%d_%p", id
, (void *)mac_srs
);
167 } else if (type
& ST_RING_OTH
) {
168 (void) snprintf(name
, sizeof (name
),
169 "mac_oth_soft_ring_%d_%p", id
, (void *)mac_srs
);
171 ASSERT(type
& ST_RING_TX
);
172 (void) snprintf(name
, sizeof (name
),
173 "mac_tx_soft_ring_%d_%p", id
, (void *)mac_srs
);
176 bzero(ringp
, sizeof (mac_soft_ring_t
));
177 (void) strncpy(ringp
->s_ring_name
, name
, S_RING_NAMELEN
+ 1);
178 ringp
->s_ring_name
[S_RING_NAMELEN
] = '\0';
179 mutex_init(&ringp
->s_ring_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
180 ringp
->s_ring_notify_cb_info
.mcbi_lockp
= &ringp
->s_ring_lock
;
182 ringp
->s_ring_type
= type
;
183 ringp
->s_ring_wait
= MSEC_TO_TICK(wait
);
184 ringp
->s_ring_mcip
= mcip
;
185 ringp
->s_ring_set
= mac_srs
;
188 * Protect against access from DR callbacks (mac_walk_srs_bind/unbind)
189 * which can't grab the mac perimeter
191 mutex_enter(&mac_srs
->srs_lock
);
192 ADD_SOFTRING_TO_SET(mac_srs
, ringp
);
193 mutex_exit(&mac_srs
->srs_lock
);
196 * set the bind CPU to -1 to indicate
197 * no thread affinity set
199 ringp
->s_ring_cpuid
= ringp
->s_ring_cpuid_save
= -1;
200 ringp
->s_ring_worker
= thread_create(NULL
, 0,
201 mac_soft_ring_worker
, ringp
, 0, &p0
, TS_RUN
, pri
);
202 if (type
& ST_RING_TX
) {
203 ringp
->s_ring_drain_func
= mac_tx_soft_ring_drain
;
204 ringp
->s_ring_tx_arg1
= x_arg1
;
205 ringp
->s_ring_tx_arg2
= x_arg2
;
206 ringp
->s_ring_tx_max_q_cnt
= mac_tx_soft_ring_max_q_cnt
;
207 ringp
->s_ring_tx_hiwat
=
208 (mac_tx_soft_ring_hiwat
> mac_tx_soft_ring_max_q_cnt
) ?
209 mac_tx_soft_ring_max_q_cnt
: mac_tx_soft_ring_hiwat
;
210 if (mcip
->mci_state_flags
& MCIS_IS_AGGR
) {
211 mac_srs_tx_t
*tx
= &mac_srs
->srs_tx
;
213 ASSERT(tx
->st_soft_rings
[
214 ((mac_ring_t
*)x_arg2
)->mr_index
] == NULL
);
215 tx
->st_soft_rings
[((mac_ring_t
*)x_arg2
)->mr_index
] =
219 ringp
->s_ring_drain_func
= mac_rx_soft_ring_drain
;
220 ringp
->s_ring_rx_func
= rx_func
;
221 ringp
->s_ring_rx_arg1
= x_arg1
;
222 ringp
->s_ring_rx_arg2
= x_arg2
;
223 if (mac_srs
->srs_state
& SRS_SOFTRING_QUEUE
)
224 ringp
->s_ring_type
|= ST_RING_WORKER_ONLY
;
227 (void) mac_soft_ring_bind(ringp
, cpuid
);
229 mac_soft_ring_stat_create(ringp
);
237 * Free the soft ring once we are done with it.
240 mac_soft_ring_free(mac_soft_ring_t
*softring
)
242 ASSERT((softring
->s_ring_state
&
243 (S_RING_CONDEMNED
| S_RING_CONDEMNED_DONE
| S_RING_PROC
)) ==
244 (S_RING_CONDEMNED
| S_RING_CONDEMNED_DONE
));
245 mac_pkt_drop(NULL
, NULL
, softring
->s_ring_first
, B_FALSE
);
246 softring
->s_ring_tx_arg2
= NULL
;
247 mac_soft_ring_stat_delete(softring
);
248 mac_callback_free(softring
->s_ring_notify_cb_list
);
249 kmem_cache_free(mac_soft_ring_cache
, softring
);
252 int mac_soft_ring_thread_bind
= 1;
257 * Bind a soft ring worker thread to supplied CPU.
260 mac_soft_ring_bind(mac_soft_ring_t
*ringp
, processorid_t cpuid
)
263 boolean_t clear
= B_FALSE
;
265 ASSERT(MUTEX_HELD(&cpu_lock
));
267 if (mac_soft_ring_thread_bind
== 0) {
268 DTRACE_PROBE1(mac__soft__ring__no__cpu__bound
,
269 mac_soft_ring_t
*, ringp
);
274 if (cp
== NULL
|| !cpu_is_online(cp
))
277 mutex_enter(&ringp
->s_ring_lock
);
278 ringp
->s_ring_state
|= S_RING_BOUND
;
279 if (ringp
->s_ring_cpuid
!= -1)
281 ringp
->s_ring_cpuid
= cpuid
;
282 mutex_exit(&ringp
->s_ring_lock
);
285 thread_affinity_clear(ringp
->s_ring_worker
);
287 DTRACE_PROBE2(mac__soft__ring__cpu__bound
, mac_soft_ring_t
*,
288 ringp
, processorid_t
, cpuid
);
290 thread_affinity_set(ringp
->s_ring_worker
, cpuid
);
296 * mac_soft_ring_unbind
298 * Un Bind a soft ring worker thread.
301 mac_soft_ring_unbind(mac_soft_ring_t
*ringp
)
303 ASSERT(MUTEX_HELD(&cpu_lock
));
305 mutex_enter(&ringp
->s_ring_lock
);
306 if (!(ringp
->s_ring_state
& S_RING_BOUND
)) {
307 ASSERT(ringp
->s_ring_cpuid
== -1);
308 mutex_exit(&ringp
->s_ring_lock
);
312 ringp
->s_ring_cpuid
= -1;
313 ringp
->s_ring_state
&= ~S_RING_BOUND
;
314 thread_affinity_clear(ringp
->s_ring_worker
);
315 mutex_exit(&ringp
->s_ring_lock
);
323 mac_soft_ring_fire(void *arg
)
325 mac_soft_ring_t
*ringp
= arg
;
327 mutex_enter(&ringp
->s_ring_lock
);
328 if (ringp
->s_ring_tid
== NULL
) {
329 mutex_exit(&ringp
->s_ring_lock
);
333 ringp
->s_ring_tid
= NULL
;
335 if (!(ringp
->s_ring_state
& S_RING_PROC
)) {
336 cv_signal(&ringp
->s_ring_async
);
338 mutex_exit(&ringp
->s_ring_lock
);
342 * mac_rx_soft_ring_drain
344 * Called when worker thread model (ST_RING_WORKER_ONLY) of processing
345 * incoming packets is used. s_ring_first contain the queued packets.
346 * s_ring_rx_func contains the upper level (client) routine where the
347 * packets are destined and s_ring_rx_arg1/s_ring_rx_arg2 are the
348 * cookie meant for the client.
352 mac_rx_soft_ring_drain(mac_soft_ring_t
*ringp
)
356 mac_resource_handle_t arg2
;
358 mac_direct_rx_t proc
;
361 mac_soft_ring_set_t
*mac_srs
= ringp
->s_ring_set
;
363 ringp
->s_ring_run
= curthread
;
364 ASSERT(mutex_owned(&ringp
->s_ring_lock
));
365 ASSERT(!(ringp
->s_ring_state
& S_RING_PROC
));
367 if ((tid
= ringp
->s_ring_tid
) != NULL
)
368 ringp
->s_ring_tid
= NULL
;
370 ringp
->s_ring_state
|= S_RING_PROC
;
372 proc
= ringp
->s_ring_rx_func
;
373 arg1
= ringp
->s_ring_rx_arg1
;
374 arg2
= ringp
->s_ring_rx_arg2
;
376 while ((ringp
->s_ring_first
!= NULL
) &&
377 !(ringp
->s_ring_state
& S_RING_PAUSE
)) {
378 mp
= ringp
->s_ring_first
;
379 ringp
->s_ring_first
= NULL
;
380 ringp
->s_ring_last
= NULL
;
381 cnt
= ringp
->s_ring_count
;
382 ringp
->s_ring_count
= 0;
383 sz
= ringp
->s_ring_size
;
384 ringp
->s_ring_size
= 0;
385 mutex_exit(&ringp
->s_ring_lock
);
388 (void) untimeout(tid
);
392 (*proc
)(arg1
, arg2
, mp
, NULL
);
395 * If we have a soft ring set which is doing
396 * bandwidth control, we need to decrement its
397 * srs_size so it can have a accurate idea of
398 * what is the real data queued between SRS and
399 * its soft rings. We decrement the size for a
400 * packet only when it gets processed by both
401 * SRS and the soft ring.
403 mutex_enter(&mac_srs
->srs_lock
);
404 MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs
, cnt
);
405 MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs
, sz
);
406 mutex_exit(&mac_srs
->srs_lock
);
408 mutex_enter(&ringp
->s_ring_lock
);
410 ringp
->s_ring_state
&= ~S_RING_PROC
;
411 if (ringp
->s_ring_state
& S_RING_CLIENT_WAIT
)
412 cv_signal(&ringp
->s_ring_client_cv
);
413 ringp
->s_ring_run
= NULL
;
417 * mac_soft_ring_worker
419 * The soft ring worker routine to process any queued packets. In
420 * normal case, the worker thread is bound to a CPU. It the soft
421 * ring is dealing with TCP packets, then the worker thread will
422 * be bound to the same CPU as the TCP squeue.
425 mac_soft_ring_worker(mac_soft_ring_t
*ringp
)
427 kmutex_t
*lock
= &ringp
->s_ring_lock
;
428 kcondvar_t
*async
= &ringp
->s_ring_async
;
429 mac_soft_ring_set_t
*srs
= ringp
->s_ring_set
;
432 CALLB_CPR_INIT(&cprinfo
, lock
, callb_generic_cpr
, "mac_soft_ring");
436 while (((ringp
->s_ring_first
== NULL
||
437 (ringp
->s_ring_state
& (S_RING_BLOCK
|S_RING_BLANK
))) &&
438 !(ringp
->s_ring_state
& S_RING_PAUSE
)) ||
439 (ringp
->s_ring_state
& S_RING_PROC
)) {
441 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
442 cv_wait(async
, lock
);
443 CALLB_CPR_SAFE_END(&cprinfo
, lock
);
447 * Either we have work to do, or we have been asked to
448 * shutdown temporarily or permanently
450 if (ringp
->s_ring_state
& S_RING_PAUSE
)
453 ringp
->s_ring_drain_func(ringp
);
457 mutex_enter(&srs
->srs_lock
);
460 ringp
->s_ring_state
|= S_RING_QUIESCE_DONE
;
461 if (!(ringp
->s_ring_state
& S_RING_CONDEMNED
)) {
462 srs
->srs_soft_ring_quiesced_count
++;
463 cv_broadcast(&srs
->srs_async
);
464 mutex_exit(&srs
->srs_lock
);
465 while (!(ringp
->s_ring_state
&
466 (S_RING_RESTART
| S_RING_CONDEMNED
)))
467 cv_wait(&ringp
->s_ring_async
, &ringp
->s_ring_lock
);
469 mutex_enter(&srs
->srs_lock
);
471 srs
->srs_soft_ring_quiesced_count
--;
472 if (ringp
->s_ring_state
& S_RING_RESTART
) {
473 ASSERT(!(ringp
->s_ring_state
& S_RING_CONDEMNED
));
474 ringp
->s_ring_state
&= ~(S_RING_RESTART
|
475 S_RING_QUIESCE
| S_RING_QUIESCE_DONE
);
476 cv_broadcast(&srs
->srs_async
);
477 mutex_exit(&srs
->srs_lock
);
481 ASSERT(ringp
->s_ring_state
& S_RING_CONDEMNED
);
482 ringp
->s_ring_state
|= S_RING_CONDEMNED_DONE
;
483 CALLB_CPR_EXIT(&cprinfo
);
484 srs
->srs_soft_ring_condemned_count
++;
485 cv_broadcast(&srs
->srs_async
);
486 mutex_exit(&srs
->srs_lock
);
491 * mac_soft_ring_intr_enable and mac_soft_ring_intr_disable
493 * these functions are called to toggle the sending of packets to the
494 * client. They are called by the client. the client gets the name
495 * of these routine and corresponding cookie (pointing to softring)
496 * during capability negotiation at setup time.
498 * Enabling is allow the processing thread to send packets to the
499 * client while disabling does the opposite.
502 mac_soft_ring_intr_enable(void *arg
)
504 mac_soft_ring_t
*ringp
= (mac_soft_ring_t
*)arg
;
505 mutex_enter(&ringp
->s_ring_lock
);
506 ringp
->s_ring_state
&= ~S_RING_BLANK
;
507 if (ringp
->s_ring_first
!= NULL
)
508 mac_soft_ring_worker_wakeup(ringp
);
509 mutex_exit(&ringp
->s_ring_lock
);
513 mac_soft_ring_intr_disable(void *arg
)
515 mac_soft_ring_t
*ringp
= (mac_soft_ring_t
*)arg
;
516 boolean_t sring_blanked
= B_FALSE
;
518 * Stop worker thread from sending packets above.
519 * Squeue will poll soft ring when it needs packets.
521 mutex_enter(&ringp
->s_ring_lock
);
522 if (!(ringp
->s_ring_state
& S_RING_PROC
)) {
523 ringp
->s_ring_state
|= S_RING_BLANK
;
524 sring_blanked
= B_TRUE
;
526 mutex_exit(&ringp
->s_ring_lock
);
527 return (sring_blanked
);
533 * This routine is called by the client to poll for packets from
534 * the soft ring. The function name and cookie corresponding to
535 * the soft ring is exchanged during capability negotiation during
539 mac_soft_ring_poll(mac_soft_ring_t
*ringp
, size_t bytes_to_pickup
)
545 mac_soft_ring_set_t
*mac_srs
= ringp
->s_ring_set
;
547 ASSERT(mac_srs
!= NULL
);
549 mutex_enter(&ringp
->s_ring_lock
);
550 head
= tail
= mp
= ringp
->s_ring_first
;
552 mutex_exit(&ringp
->s_ring_lock
);
556 if (ringp
->s_ring_size
<= bytes_to_pickup
) {
557 head
= ringp
->s_ring_first
;
558 ringp
->s_ring_first
= NULL
;
559 ringp
->s_ring_last
= NULL
;
560 cnt
= ringp
->s_ring_count
;
561 ringp
->s_ring_count
= 0;
562 sz
= ringp
->s_ring_size
;
563 ringp
->s_ring_size
= 0;
565 while (mp
&& sz
<= bytes_to_pickup
) {
571 ringp
->s_ring_count
-= cnt
;
572 ringp
->s_ring_size
-= sz
;
575 ringp
->s_ring_first
= NULL
;
576 ringp
->s_ring_last
= NULL
;
577 ASSERT(ringp
->s_ring_count
== 0);
579 ringp
->s_ring_first
= mp
;
583 mutex_exit(&ringp
->s_ring_lock
);
585 * Update the shared count and size counters so
586 * that SRS has a accurate idea of queued packets.
588 mutex_enter(&mac_srs
->srs_lock
);
589 MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs
, cnt
);
590 MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs
, sz
);
591 mutex_exit(&mac_srs
->srs_lock
);
596 * mac_soft_ring_dls_bypass
598 * Enable direct client (IP) callback function from the softrings.
599 * Callers need to make sure they don't need any DLS layer processing
602 mac_soft_ring_dls_bypass(void *arg
, mac_direct_rx_t rx_func
, void *rx_arg1
)
604 mac_soft_ring_t
*softring
= arg
;
605 mac_soft_ring_set_t
*srs
;
607 ASSERT(rx_func
!= NULL
);
609 mutex_enter(&softring
->s_ring_lock
);
610 softring
->s_ring_rx_func
= rx_func
;
611 softring
->s_ring_rx_arg1
= rx_arg1
;
612 mutex_exit(&softring
->s_ring_lock
);
614 srs
= softring
->s_ring_set
;
615 mutex_enter(&srs
->srs_lock
);
616 srs
->srs_type
|= SRST_DLS_BYPASS
;
617 mutex_exit(&srs
->srs_lock
);
621 * mac_soft_ring_signal
623 * Typically used to set the soft ring state to QUIESCE, CONDEMNED, or
626 * In the Rx side, the quiescing is done bottom up. After the Rx upcalls
627 * from the driver are done, then the Rx SRS is quiesced and only then can
628 * we signal the soft rings. Thus this function can't be called arbitrarily
629 * without satisfying the prerequisites. On the Tx side, the threads from
630 * top need to quiesced, then the Tx SRS and only then can we signal the
634 mac_soft_ring_signal(mac_soft_ring_t
*softring
, uint_t sr_flag
)
636 mutex_enter(&softring
->s_ring_lock
);
637 softring
->s_ring_state
|= sr_flag
;
638 cv_signal(&softring
->s_ring_async
);
639 mutex_exit(&softring
->s_ring_lock
);
643 * mac_tx_soft_ring_drain
645 * The transmit side drain routine in case the soft ring was being
646 * used to transmit packets.
649 mac_tx_soft_ring_drain(mac_soft_ring_t
*ringp
)
655 uint_t saved_pkt_count
, saved_size
;
656 mac_tx_stats_t stats
;
657 mac_soft_ring_set_t
*mac_srs
= ringp
->s_ring_set
;
659 saved_pkt_count
= saved_size
= 0;
660 ringp
->s_ring_run
= curthread
;
661 ASSERT(mutex_owned(&ringp
->s_ring_lock
));
662 ASSERT(!(ringp
->s_ring_state
& S_RING_PROC
));
664 ringp
->s_ring_state
|= S_RING_PROC
;
665 arg1
= ringp
->s_ring_tx_arg1
;
666 arg2
= ringp
->s_ring_tx_arg2
;
668 while (ringp
->s_ring_first
!= NULL
) {
669 mp
= ringp
->s_ring_first
;
670 tail
= ringp
->s_ring_last
;
671 saved_pkt_count
= ringp
->s_ring_count
;
672 saved_size
= ringp
->s_ring_size
;
673 ringp
->s_ring_first
= NULL
;
674 ringp
->s_ring_last
= NULL
;
675 ringp
->s_ring_count
= 0;
676 ringp
->s_ring_size
= 0;
677 mutex_exit(&ringp
->s_ring_lock
);
679 mp
= mac_tx_send(arg1
, arg2
, mp
, &stats
);
681 mutex_enter(&ringp
->s_ring_lock
);
683 /* Device out of tx desc, set block */
684 tail
->b_next
= ringp
->s_ring_first
;
685 ringp
->s_ring_first
= mp
;
686 ringp
->s_ring_count
+=
687 (saved_pkt_count
- stats
.mts_opackets
);
688 ringp
->s_ring_size
+= (saved_size
- stats
.mts_obytes
);
689 if (ringp
->s_ring_last
== NULL
)
690 ringp
->s_ring_last
= tail
;
692 if (ringp
->s_ring_tx_woken_up
) {
693 ringp
->s_ring_tx_woken_up
= B_FALSE
;
695 ringp
->s_ring_state
|= S_RING_BLOCK
;
696 ringp
->s_st_stat
.mts_blockcnt
++;
699 ringp
->s_ring_state
&= ~S_RING_PROC
;
700 ringp
->s_ring_run
= NULL
;
703 ringp
->s_ring_tx_woken_up
= B_FALSE
;
704 SRS_TX_STATS_UPDATE(mac_srs
, &stats
);
705 SOFTRING_TX_STATS_UPDATE(ringp
, &stats
);
709 if (ringp
->s_ring_count
== 0 && ringp
->s_ring_state
&
710 (S_RING_TX_HIWAT
| S_RING_WAKEUP_CLIENT
| S_RING_ENQUEUED
)) {
711 mac_client_impl_t
*mcip
= ringp
->s_ring_mcip
;
712 boolean_t wakeup_required
= B_FALSE
;
714 if (ringp
->s_ring_state
&
715 (S_RING_TX_HIWAT
|S_RING_WAKEUP_CLIENT
)) {
716 wakeup_required
= B_TRUE
;
718 ringp
->s_ring_state
&=
719 ~(S_RING_TX_HIWAT
| S_RING_WAKEUP_CLIENT
| S_RING_ENQUEUED
);
720 mutex_exit(&ringp
->s_ring_lock
);
721 if (wakeup_required
) {
722 mac_tx_invoke_callbacks(mcip
, (mac_tx_cookie_t
)ringp
);
724 * If the client is not the primary MAC client, then we
725 * need to send the notification to the clients upper
726 * MAC, i.e. mci_upper_mip.
728 mac_tx_notify(mcip
->mci_upper_mip
!= NULL
?
729 mcip
->mci_upper_mip
: mcip
->mci_mip
);
731 mutex_enter(&ringp
->s_ring_lock
);
733 ringp
->s_ring_state
&= ~S_RING_PROC
;
734 ringp
->s_ring_run
= NULL
;