2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/timer.h>
27 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28 static int timeout_base_ns
[] = {
39 static int timeout_us
;
41 static int baudisabled
;
42 static spinlock_t disable_lock
;
43 static cycles_t congested_cycles
;
46 static int max_concurr
= MAX_BAU_CONCURRENT
;
47 static int max_concurr_const
= MAX_BAU_CONCURRENT
;
48 static int plugged_delay
= PLUGGED_DELAY
;
49 static int plugsb4reset
= PLUGSB4RESET
;
50 static int timeoutsb4reset
= TIMEOUTSB4RESET
;
51 static int ipi_reset_limit
= IPI_RESET_LIMIT
;
52 static int complete_threshold
= COMPLETE_THRESHOLD
;
53 static int congested_respns_us
= CONGESTED_RESPONSE_US
;
54 static int congested_reps
= CONGESTED_REPS
;
55 static int congested_period
= CONGESTED_PERIOD
;
57 static struct tunables tunables
[] = {
58 {&max_concurr
, MAX_BAU_CONCURRENT
}, /* must be [0] */
59 {&plugged_delay
, PLUGGED_DELAY
},
60 {&plugsb4reset
, PLUGSB4RESET
},
61 {&timeoutsb4reset
, TIMEOUTSB4RESET
},
62 {&ipi_reset_limit
, IPI_RESET_LIMIT
},
63 {&complete_threshold
, COMPLETE_THRESHOLD
},
64 {&congested_respns_us
, CONGESTED_RESPONSE_US
},
65 {&congested_reps
, CONGESTED_REPS
},
66 {&congested_period
, CONGESTED_PERIOD
}
69 static struct dentry
*tunables_dir
;
70 static struct dentry
*tunables_file
;
72 /* these correspond to the statistics printed by ptc_seq_show() */
73 static char *stat_description
[] = {
74 "sent: number of shootdown messages sent",
75 "stime: time spent sending messages",
76 "numuvhubs: number of hubs targeted with shootdown",
77 "numuvhubs16: number times 16 or more hubs targeted",
78 "numuvhubs8: number times 8 or more hubs targeted",
79 "numuvhubs4: number times 4 or more hubs targeted",
80 "numuvhubs2: number times 2 or more hubs targeted",
81 "numuvhubs1: number times 1 hub targeted",
82 "numcpus: number of cpus targeted with shootdown",
83 "dto: number of destination timeouts",
84 "retries: destination timeout retries sent",
85 "rok: : destination timeouts successfully retried",
86 "resetp: ipi-style resource resets for plugs",
87 "resett: ipi-style resource resets for timeouts",
88 "giveup: fall-backs to ipi-style shootdowns",
89 "sto: number of source timeouts",
90 "bz: number of stay-busy's",
91 "throt: number times spun in throttle",
92 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93 "recv: shootdown messages received",
94 "rtime: time spent processing messages",
95 "all: shootdown all-tlb messages",
96 "one: shootdown one-tlb messages",
97 "mult: interrupts that found multiple messages",
98 "none: interrupts that found no messages",
99 "retry: number of retry messages processed",
100 "canc: number messages canceled by retries",
101 "nocan: number retries that found nothing to cancel",
102 "reset: number of ipi-style reset requests processed",
103 "rcan: number messages canceled by reset requests",
104 "disable: number times use of the BAU was disabled",
105 "enable: number times use of the BAU was re-enabled"
109 setup_nobau(char *arg
)
114 early_param("nobau", setup_nobau
);
116 /* base pnode in this partition */
117 static int uv_base_pnode __read_mostly
;
118 /* position of pnode (which is nasid>>1): */
119 static int uv_nshift __read_mostly
;
120 static unsigned long uv_mmask __read_mostly
;
122 static DEFINE_PER_CPU(struct ptc_stats
, ptcstats
);
123 static DEFINE_PER_CPU(struct bau_control
, bau_control
);
124 static DEFINE_PER_CPU(cpumask_var_t
, uv_flush_tlb_mask
);
127 * Determine the first node on a uvhub. 'Nodes' are used for kernel
130 static int __init
uvhub_to_first_node(int uvhub
)
134 for_each_online_node(node
) {
135 b
= uv_node_to_blade_id(node
);
143 * Determine the apicid of the first cpu on a uvhub.
145 static int __init
uvhub_to_first_apicid(int uvhub
)
149 for_each_present_cpu(cpu
)
150 if (uvhub
== uv_cpu_to_blade_id(cpu
))
151 return per_cpu(x86_cpu_to_apicid
, cpu
);
156 * Free a software acknowledge hardware resource by clearing its Pending
157 * bit. This will return a reply to the sender.
158 * If the message has timed out, a reply has already been sent by the
159 * hardware but the resource has not been released. In that case our
160 * clear of the Timeout bit (as well) will free the resource. No reply will
161 * be sent (the hardware will only do one reply per message).
163 static void reply_to_message(struct msg_desc
*mdp
, struct bau_control
*bcp
)
166 struct bau_pq_entry
*msg
;
169 if (!msg
->canceled
) {
170 dw
= (msg
->swack_vec
<< UV_SW_ACK_NPENDING
) | msg
->swack_vec
;
171 write_mmr_sw_ack(dw
);
178 * Process the receipt of a RETRY message
180 static void bau_process_retry_msg(struct msg_desc
*mdp
,
181 struct bau_control
*bcp
)
184 int cancel_count
= 0;
185 unsigned long msg_res
;
186 unsigned long mmr
= 0;
187 struct bau_pq_entry
*msg
= mdp
->msg
;
188 struct bau_pq_entry
*msg2
;
189 struct ptc_stats
*stat
= bcp
->statp
;
193 * cancel any message from msg+1 to the retry itself
195 for (msg2
= msg
+1, i
= 0; i
< DEST_Q_SIZE
; msg2
++, i
++) {
196 if (msg2
> mdp
->queue_last
)
197 msg2
= mdp
->queue_first
;
201 /* same conditions for cancellation as do_reset */
202 if ((msg2
->replied_to
== 0) && (msg2
->canceled
== 0) &&
203 (msg2
->swack_vec
) && ((msg2
->swack_vec
&
204 msg
->swack_vec
) == 0) &&
205 (msg2
->sending_cpu
== msg
->sending_cpu
) &&
206 (msg2
->msg_type
!= MSG_NOOP
)) {
207 mmr
= read_mmr_sw_ack();
208 msg_res
= msg2
->swack_vec
;
210 * This is a message retry; clear the resources held
211 * by the previous message only if they timed out.
212 * If it has not timed out we have an unexpected
213 * situation to report.
215 if (mmr
& (msg_res
<< UV_SW_ACK_NPENDING
)) {
218 * is the resource timed out?
219 * make everyone ignore the cancelled message.
224 mr
= (msg_res
<< UV_SW_ACK_NPENDING
) | msg_res
;
225 write_mmr_sw_ack(mr
);
230 stat
->d_nocanceled
++;
234 * Do all the things a cpu should do for a TLB shootdown message.
235 * Other cpu's may come here at the same time for this message.
237 static void bau_process_message(struct msg_desc
*mdp
,
238 struct bau_control
*bcp
)
240 short socket_ack_count
= 0;
242 struct atomic_short
*asp
;
243 struct ptc_stats
*stat
= bcp
->statp
;
244 struct bau_pq_entry
*msg
= mdp
->msg
;
245 struct bau_control
*smaster
= bcp
->socket_master
;
248 * This must be a normal message, or retry of a normal message
250 if (msg
->address
== TLB_FLUSH_ALL
) {
254 __flush_tlb_one(msg
->address
);
260 * One cpu on each uvhub has the additional job on a RETRY
261 * of releasing the resource held by the message that is
262 * being retried. That message is identified by sending
265 if (msg
->msg_type
== MSG_RETRY
&& bcp
== bcp
->uvhub_master
)
266 bau_process_retry_msg(mdp
, bcp
);
269 * This is a swack message, so we have to reply to it.
270 * Count each responding cpu on the socket. This avoids
271 * pinging the count's cache line back and forth between
274 sp
= &smaster
->socket_acknowledge_count
[mdp
->msg_slot
];
275 asp
= (struct atomic_short
*)sp
;
276 socket_ack_count
= atom_asr(1, asp
);
277 if (socket_ack_count
== bcp
->cpus_in_socket
) {
280 * Both sockets dump their completed count total into
281 * the message's count.
283 smaster
->socket_acknowledge_count
[mdp
->msg_slot
] = 0;
284 asp
= (struct atomic_short
*)&msg
->acknowledge_count
;
285 msg_ack_count
= atom_asr(socket_ack_count
, asp
);
287 if (msg_ack_count
== bcp
->cpus_in_uvhub
) {
289 * All cpus in uvhub saw it; reply
291 reply_to_message(mdp
, bcp
);
299 * Determine the first cpu on a uvhub.
301 static int uvhub_to_first_cpu(int uvhub
)
304 for_each_present_cpu(cpu
)
305 if (uvhub
== uv_cpu_to_blade_id(cpu
))
311 * Last resort when we get a large number of destination timeouts is
312 * to clear resources held by a given cpu.
313 * Do this with IPI so that all messages in the BAU message queue
314 * can be identified by their nonzero swack_vec field.
316 * This is entered for a single cpu on the uvhub.
317 * The sender want's this uvhub to free a specific message's
320 static void do_reset(void *ptr
)
323 struct bau_control
*bcp
= &per_cpu(bau_control
, smp_processor_id());
324 struct reset_args
*rap
= (struct reset_args
*)ptr
;
325 struct bau_pq_entry
*msg
;
326 struct ptc_stats
*stat
= bcp
->statp
;
330 * We're looking for the given sender, and
331 * will free its swack resource.
332 * If all cpu's finally responded after the timeout, its
333 * message 'replied_to' was set.
335 for (msg
= bcp
->queue_first
, i
= 0; i
< DEST_Q_SIZE
; msg
++, i
++) {
336 unsigned long msg_res
;
337 /* do_reset: same conditions for cancellation as
338 bau_process_retry_msg() */
339 if ((msg
->replied_to
== 0) &&
340 (msg
->canceled
== 0) &&
341 (msg
->sending_cpu
== rap
->sender
) &&
343 (msg
->msg_type
!= MSG_NOOP
)) {
347 * make everyone else ignore this message
351 * only reset the resource if it is still pending
353 mmr
= read_mmr_sw_ack();
354 msg_res
= msg
->swack_vec
;
355 mr
= (msg_res
<< UV_SW_ACK_NPENDING
) | msg_res
;
358 write_mmr_sw_ack(mr
);
366 * Use IPI to get all target uvhubs to release resources held by
367 * a given sending cpu number.
369 static void reset_with_ipi(struct bau_targ_hubmask
*distribution
, int sender
)
374 struct reset_args reset_args
;
376 reset_args
.sender
= sender
;
378 /* find a single cpu for each uvhub in this distribution mask */
379 maskbits
= sizeof(struct bau_targ_hubmask
) * BITSPERBYTE
;
380 for (uvhub
= 0; uvhub
< maskbits
; uvhub
++) {
382 if (!bau_uvhub_isset(uvhub
, distribution
))
384 /* find a cpu for this uvhub */
385 cpu
= uvhub_to_first_cpu(uvhub
);
389 /* IPI all cpus; preemption is already disabled */
390 smp_call_function_many(&mask
, do_reset
, (void *)&reset_args
, 1);
394 static inline unsigned long cycles_2_us(unsigned long long cyc
)
396 unsigned long long ns
;
398 int cpu
= smp_processor_id();
400 ns
= (cyc
* per_cpu(cyc2ns
, cpu
)) >> CYC2NS_SCALE_FACTOR
;
406 * wait for all cpus on this hub to finish their sends and go quiet
407 * leaves uvhub_quiesce set so that no new broadcasts are started by
408 * bau_flush_send_and_wait()
410 static inline void quiesce_local_uvhub(struct bau_control
*hmaster
)
412 atom_asr(1, (struct atomic_short
*)&hmaster
->uvhub_quiesce
);
416 * mark this quiet-requestor as done
418 static inline void end_uvhub_quiesce(struct bau_control
*hmaster
)
420 atom_asr(-1, (struct atomic_short
*)&hmaster
->uvhub_quiesce
);
423 static unsigned long uv1_read_status(unsigned long mmr_offset
, int right_shift
)
425 unsigned long descriptor_status
;
427 descriptor_status
= uv_read_local_mmr(mmr_offset
);
428 descriptor_status
>>= right_shift
;
429 descriptor_status
&= UV_ACT_STATUS_MASK
;
430 return descriptor_status
;
434 * Wait for completion of a broadcast software ack message
435 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
437 static int uv1_wait_completion(struct bau_desc
*bau_desc
,
438 unsigned long mmr_offset
, int right_shift
,
439 struct bau_control
*bcp
, long try)
441 unsigned long descriptor_status
;
443 struct ptc_stats
*stat
= bcp
->statp
;
445 descriptor_status
= uv1_read_status(mmr_offset
, right_shift
);
446 /* spin on the status MMR, waiting for it to go idle */
447 while ((descriptor_status
!= DS_IDLE
)) {
449 * Our software ack messages may be blocked because
450 * there are no swack resources available. As long
451 * as none of them has timed out hardware will NACK
452 * our message and its state will stay IDLE.
454 if (descriptor_status
== DS_SOURCE_TIMEOUT
) {
457 } else if (descriptor_status
== DS_DESTINATION_TIMEOUT
) {
462 * Our retries may be blocked by all destination
463 * swack resources being consumed, and a timeout
464 * pending. In that case hardware returns the
465 * ERROR that looks like a destination timeout.
467 if (cycles_2_us(ttm
- bcp
->send_message
) < timeout_us
) {
468 bcp
->conseccompletes
= 0;
469 return FLUSH_RETRY_PLUGGED
;
472 bcp
->conseccompletes
= 0;
473 return FLUSH_RETRY_TIMEOUT
;
476 * descriptor_status is still BUSY
480 descriptor_status
= uv1_read_status(mmr_offset
, right_shift
);
482 bcp
->conseccompletes
++;
483 return FLUSH_COMPLETE
;
487 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
489 static unsigned long uv2_read_status(unsigned long offset
, int rshft
, int cpu
)
491 unsigned long descriptor_status
;
492 unsigned long descriptor_status2
;
494 descriptor_status
= ((read_lmmr(offset
) >> rshft
) & UV_ACT_STATUS_MASK
);
495 descriptor_status2
= (read_mmr_uv2_status() >> cpu
) & 0x1UL
;
496 descriptor_status
= (descriptor_status
<< 1) | descriptor_status2
;
497 return descriptor_status
;
500 static int uv2_wait_completion(struct bau_desc
*bau_desc
,
501 unsigned long mmr_offset
, int right_shift
,
502 struct bau_control
*bcp
, long try)
504 unsigned long descriptor_stat
;
506 int cpu
= bcp
->uvhub_cpu
;
507 struct ptc_stats
*stat
= bcp
->statp
;
509 descriptor_stat
= uv2_read_status(mmr_offset
, right_shift
, cpu
);
511 /* spin on the status MMR, waiting for it to go idle */
512 while (descriptor_stat
!= UV2H_DESC_IDLE
) {
514 * Our software ack messages may be blocked because
515 * there are no swack resources available. As long
516 * as none of them has timed out hardware will NACK
517 * our message and its state will stay IDLE.
519 if ((descriptor_stat
== UV2H_DESC_SOURCE_TIMEOUT
) ||
520 (descriptor_stat
== UV2H_DESC_DEST_STRONG_NACK
) ||
521 (descriptor_stat
== UV2H_DESC_DEST_PUT_ERR
)) {
524 } else if (descriptor_stat
== UV2H_DESC_DEST_TIMEOUT
) {
528 * Our retries may be blocked by all destination
529 * swack resources being consumed, and a timeout
530 * pending. In that case hardware returns the
531 * ERROR that looks like a destination timeout.
533 if (cycles_2_us(ttm
- bcp
->send_message
) < timeout_us
) {
534 bcp
->conseccompletes
= 0;
535 return FLUSH_RETRY_PLUGGED
;
537 bcp
->conseccompletes
= 0;
538 return FLUSH_RETRY_TIMEOUT
;
541 * descriptor_stat is still BUSY
545 descriptor_stat
= uv2_read_status(mmr_offset
, right_shift
, cpu
);
547 bcp
->conseccompletes
++;
548 return FLUSH_COMPLETE
;
552 * There are 2 status registers; each and array[32] of 2 bits. Set up for
553 * which register to read and position in that register based on cpu in
556 static int wait_completion(struct bau_desc
*bau_desc
,
557 struct bau_control
*bcp
, long try)
560 unsigned long mmr_offset
;
561 int cpu
= bcp
->uvhub_cpu
;
563 if (cpu
< UV_CPUS_PER_AS
) {
564 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
565 right_shift
= cpu
* UV_ACT_STATUS_SIZE
;
567 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_1
;
568 right_shift
= ((cpu
- UV_CPUS_PER_AS
) * UV_ACT_STATUS_SIZE
);
572 return uv1_wait_completion(bau_desc
, mmr_offset
, right_shift
,
575 return uv2_wait_completion(bau_desc
, mmr_offset
, right_shift
,
579 static inline cycles_t
sec_2_cycles(unsigned long sec
)
584 ns
= sec
* 1000000000;
585 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
590 * Our retries are blocked by all destination sw ack resources being
591 * in use, and a timeout is pending. In that case hardware immediately
592 * returns the ERROR that looks like a destination timeout.
594 static void destination_plugged(struct bau_desc
*bau_desc
,
595 struct bau_control
*bcp
,
596 struct bau_control
*hmaster
, struct ptc_stats
*stat
)
598 udelay(bcp
->plugged_delay
);
599 bcp
->plugged_tries
++;
601 if (bcp
->plugged_tries
>= bcp
->plugsb4reset
) {
602 bcp
->plugged_tries
= 0;
604 quiesce_local_uvhub(hmaster
);
606 spin_lock(&hmaster
->queue_lock
);
607 reset_with_ipi(&bau_desc
->distribution
, bcp
->cpu
);
608 spin_unlock(&hmaster
->queue_lock
);
610 end_uvhub_quiesce(hmaster
);
613 stat
->s_resets_plug
++;
617 static void destination_timeout(struct bau_desc
*bau_desc
,
618 struct bau_control
*bcp
, struct bau_control
*hmaster
,
619 struct ptc_stats
*stat
)
621 hmaster
->max_concurr
= 1;
622 bcp
->timeout_tries
++;
623 if (bcp
->timeout_tries
>= bcp
->timeoutsb4reset
) {
624 bcp
->timeout_tries
= 0;
626 quiesce_local_uvhub(hmaster
);
628 spin_lock(&hmaster
->queue_lock
);
629 reset_with_ipi(&bau_desc
->distribution
, bcp
->cpu
);
630 spin_unlock(&hmaster
->queue_lock
);
632 end_uvhub_quiesce(hmaster
);
635 stat
->s_resets_timeout
++;
640 * Completions are taking a very long time due to a congested numalink
643 static void disable_for_congestion(struct bau_control
*bcp
,
644 struct ptc_stats
*stat
)
646 /* let only one cpu do this disabling */
647 spin_lock(&disable_lock
);
649 if (!baudisabled
&& bcp
->period_requests
&&
650 ((bcp
->period_time
/ bcp
->period_requests
) > congested_cycles
)) {
652 struct bau_control
*tbcp
;
653 /* it becomes this cpu's job to turn on the use of the
656 bcp
->set_bau_off
= 1;
657 bcp
->set_bau_on_time
= get_cycles();
658 bcp
->set_bau_on_time
+= sec_2_cycles(bcp
->cong_period
);
659 stat
->s_bau_disabled
++;
660 for_each_present_cpu(tcpu
) {
661 tbcp
= &per_cpu(bau_control
, tcpu
);
662 tbcp
->baudisabled
= 1;
666 spin_unlock(&disable_lock
);
669 static void count_max_concurr(int stat
, struct bau_control
*bcp
,
670 struct bau_control
*hmaster
)
672 bcp
->plugged_tries
= 0;
673 bcp
->timeout_tries
= 0;
674 if (stat
!= FLUSH_COMPLETE
)
676 if (bcp
->conseccompletes
<= bcp
->complete_threshold
)
678 if (hmaster
->max_concurr
>= hmaster
->max_concurr_const
)
680 hmaster
->max_concurr
++;
683 static void record_send_stats(cycles_t time1
, cycles_t time2
,
684 struct bau_control
*bcp
, struct ptc_stats
*stat
,
685 int completion_status
, int try)
690 elapsed
= time2
- time1
;
691 stat
->s_time
+= elapsed
;
693 if ((completion_status
== FLUSH_COMPLETE
) && (try == 1)) {
694 bcp
->period_requests
++;
695 bcp
->period_time
+= elapsed
;
696 if ((elapsed
> congested_cycles
) &&
697 (bcp
->period_requests
> bcp
->cong_reps
))
698 disable_for_congestion(bcp
, stat
);
703 if (completion_status
== FLUSH_COMPLETE
&& try > 1)
705 else if (completion_status
== FLUSH_GIVEUP
)
710 * Because of a uv1 hardware bug only a limited number of concurrent
711 * requests can be made.
713 static void uv1_throttle(struct bau_control
*hmaster
, struct ptc_stats
*stat
)
715 spinlock_t
*lock
= &hmaster
->uvhub_lock
;
718 v
= &hmaster
->active_descriptor_count
;
719 if (!atomic_inc_unless_ge(lock
, v
, hmaster
->max_concurr
)) {
723 } while (!atomic_inc_unless_ge(lock
, v
, hmaster
->max_concurr
));
728 * Handle the completion status of a message send.
730 static void handle_cmplt(int completion_status
, struct bau_desc
*bau_desc
,
731 struct bau_control
*bcp
, struct bau_control
*hmaster
,
732 struct ptc_stats
*stat
)
734 if (completion_status
== FLUSH_RETRY_PLUGGED
)
735 destination_plugged(bau_desc
, bcp
, hmaster
, stat
);
736 else if (completion_status
== FLUSH_RETRY_TIMEOUT
)
737 destination_timeout(bau_desc
, bcp
, hmaster
, stat
);
741 * Send a broadcast and wait for it to complete.
743 * The flush_mask contains the cpus the broadcast is to be sent to including
744 * cpus that are on the local uvhub.
746 * Returns 0 if all flushing represented in the mask was done.
747 * Returns 1 if it gives up entirely and the original cpu mask is to be
748 * returned to the kernel.
750 int uv_flush_send_and_wait(struct bau_desc
*bau_desc
,
751 struct cpumask
*flush_mask
, struct bau_control
*bcp
)
754 int completion_stat
= 0;
759 struct ptc_stats
*stat
= bcp
->statp
;
760 struct bau_control
*hmaster
= bcp
->uvhub_master
;
763 uv1_throttle(hmaster
, stat
);
765 while (hmaster
->uvhub_quiesce
)
768 time1
= get_cycles();
771 bau_desc
->header
.msg_type
= MSG_REGULAR
;
772 seq_number
= bcp
->message_number
++;
774 bau_desc
->header
.msg_type
= MSG_RETRY
;
775 stat
->s_retry_messages
++;
778 bau_desc
->header
.sequence
= seq_number
;
779 index
= (1UL << AS_PUSH_SHIFT
) | bcp
->uvhub_cpu
;
780 bcp
->send_message
= get_cycles();
782 write_mmr_activation(index
);
785 completion_stat
= wait_completion(bau_desc
, bcp
, try);
787 handle_cmplt(completion_stat
, bau_desc
, bcp
, hmaster
, stat
);
789 if (bcp
->ipi_attempts
>= bcp
->ipi_reset_limit
) {
790 bcp
->ipi_attempts
= 0;
791 completion_stat
= FLUSH_GIVEUP
;
795 } while ((completion_stat
== FLUSH_RETRY_PLUGGED
) ||
796 (completion_stat
== FLUSH_RETRY_TIMEOUT
));
798 time2
= get_cycles();
800 count_max_concurr(completion_stat
, bcp
, hmaster
);
802 while (hmaster
->uvhub_quiesce
)
805 atomic_dec(&hmaster
->active_descriptor_count
);
807 record_send_stats(time1
, time2
, bcp
, stat
, completion_stat
, try);
809 if (completion_stat
== FLUSH_GIVEUP
)
815 * The BAU is disabled. When the disabled time period has expired, the cpu
816 * that disabled it must re-enable it.
817 * Return 0 if it is re-enabled for all cpus.
819 static int check_enable(struct bau_control
*bcp
, struct ptc_stats
*stat
)
822 struct bau_control
*tbcp
;
824 if (bcp
->set_bau_off
) {
825 if (get_cycles() >= bcp
->set_bau_on_time
) {
826 stat
->s_bau_reenabled
++;
828 for_each_present_cpu(tcpu
) {
829 tbcp
= &per_cpu(bau_control
, tcpu
);
830 tbcp
->baudisabled
= 0;
831 tbcp
->period_requests
= 0;
832 tbcp
->period_time
= 0;
840 static void record_send_statistics(struct ptc_stats
*stat
, int locals
, int hubs
,
841 int remotes
, struct bau_desc
*bau_desc
)
844 stat
->s_ntargcpu
+= remotes
+ locals
;
845 stat
->s_ntargremotes
+= remotes
;
846 stat
->s_ntarglocals
+= locals
;
848 /* uvhub statistics */
849 hubs
= bau_uvhub_weight(&bau_desc
->distribution
);
851 stat
->s_ntarglocaluvhub
++;
852 stat
->s_ntargremoteuvhub
+= (hubs
- 1);
854 stat
->s_ntargremoteuvhub
+= hubs
;
856 stat
->s_ntarguvhub
+= hubs
;
859 stat
->s_ntarguvhub16
++;
861 stat
->s_ntarguvhub8
++;
863 stat
->s_ntarguvhub4
++;
865 stat
->s_ntarguvhub2
++;
867 stat
->s_ntarguvhub1
++;
871 * Translate a cpu mask to the uvhub distribution mask in the BAU
872 * activation descriptor.
874 static int set_distrib_bits(struct cpumask
*flush_mask
, struct bau_control
*bcp
,
875 struct bau_desc
*bau_desc
, int *localsp
, int *remotesp
)
880 struct hub_and_pnode
*hpp
;
882 for_each_cpu(cpu
, flush_mask
) {
884 * The distribution vector is a bit map of pnodes, relative
885 * to the partition base pnode (and the partition base nasid
887 * Translate cpu to pnode and hub using a local memory array.
889 hpp
= &bcp
->socket_master
->thp
[cpu
];
890 pnode
= hpp
->pnode
- bcp
->partition_base_pnode
;
891 bau_uvhub_set(pnode
, &bau_desc
->distribution
);
893 if (hpp
->uvhub
== bcp
->uvhub
)
904 * globally purge translation cache of a virtual address or all TLB's
905 * @cpumask: mask of all cpu's in which the address is to be removed
906 * @mm: mm_struct containing virtual address range
907 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
908 * @cpu: the current cpu
910 * This is the entry point for initiating any UV global TLB shootdown.
912 * Purges the translation caches of all specified processors of the given
913 * virtual address, or purges all TLB's on specified processors.
915 * The caller has derived the cpumask from the mm_struct. This function
916 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
918 * The cpumask is converted into a uvhubmask of the uvhubs containing
921 * Note that this function should be called with preemption disabled.
923 * Returns NULL if all remote flushing was done.
924 * Returns pointer to cpumask if some remote flushing remains to be
925 * done. The returned pointer is valid till preemption is re-enabled.
927 const struct cpumask
*uv_flush_tlb_others(const struct cpumask
*cpumask
,
928 struct mm_struct
*mm
, unsigned long va
,
934 struct bau_desc
*bau_desc
;
935 struct cpumask
*flush_mask
;
936 struct ptc_stats
*stat
;
937 struct bau_control
*bcp
;
939 /* kernel was booted 'nobau' */
943 bcp
= &per_cpu(bau_control
, cpu
);
946 /* bau was disabled due to slow response */
947 if (bcp
->baudisabled
) {
948 if (check_enable(bcp
, stat
))
953 * Each sending cpu has a per-cpu mask which it fills from the caller's
954 * cpu mask. All cpus are converted to uvhubs and copied to the
955 * activation descriptor.
957 flush_mask
= (struct cpumask
*)per_cpu(uv_flush_tlb_mask
, cpu
);
958 /* don't actually do a shootdown of the local cpu */
959 cpumask_andnot(flush_mask
, cpumask
, cpumask_of(cpu
));
961 if (cpu_isset(cpu
, *cpumask
))
964 bau_desc
= bcp
->descriptor_base
;
965 bau_desc
+= ITEMS_PER_DESC
* bcp
->uvhub_cpu
;
966 bau_uvhubs_clear(&bau_desc
->distribution
, UV_DISTRIBUTION_SIZE
);
967 if (set_distrib_bits(flush_mask
, bcp
, bau_desc
, &locals
, &remotes
))
970 record_send_statistics(stat
, locals
, hubs
, remotes
, bau_desc
);
972 bau_desc
->payload
.address
= va
;
973 bau_desc
->payload
.sending_cpu
= cpu
;
975 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
976 * or 1 if it gave up and the original cpumask should be returned.
978 if (!uv_flush_send_and_wait(bau_desc
, flush_mask
, bcp
))
985 * The BAU message interrupt comes here. (registered by set_intr_gate)
988 * We received a broadcast assist message.
990 * Interrupts are disabled; this interrupt could represent
991 * the receipt of several messages.
993 * All cores/threads on this hub get this interrupt.
994 * The last one to see it does the software ack.
995 * (the resource will not be freed until noninterruptable cpus see this
996 * interrupt; hardware may timeout the s/w ack and reply ERROR)
998 void uv_bau_message_interrupt(struct pt_regs
*regs
)
1001 cycles_t time_start
;
1002 struct bau_pq_entry
*msg
;
1003 struct bau_control
*bcp
;
1004 struct ptc_stats
*stat
;
1005 struct msg_desc msgdesc
;
1007 time_start
= get_cycles();
1009 bcp
= &per_cpu(bau_control
, smp_processor_id());
1012 msgdesc
.queue_first
= bcp
->queue_first
;
1013 msgdesc
.queue_last
= bcp
->queue_last
;
1015 msg
= bcp
->bau_msg_head
;
1016 while (msg
->swack_vec
) {
1019 msgdesc
.msg_slot
= msg
- msgdesc
.queue_first
;
1020 msgdesc
.swack_slot
= ffs(msg
->swack_vec
) - 1;
1022 bau_process_message(&msgdesc
, bcp
);
1025 if (msg
> msgdesc
.queue_last
)
1026 msg
= msgdesc
.queue_first
;
1027 bcp
->bau_msg_head
= msg
;
1029 stat
->d_time
+= (get_cycles() - time_start
);
1039 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1040 * shootdown message timeouts enabled. The timeout does not cause
1041 * an interrupt, but causes an error message to be returned to
1044 static void __init
enable_timeouts(void)
1049 unsigned long mmr_image
;
1051 nuvhubs
= uv_num_possible_blades();
1053 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
1054 if (!uv_blade_nr_possible_cpus(uvhub
))
1057 pnode
= uv_blade_to_pnode(uvhub
);
1058 mmr_image
= read_mmr_misc_control(pnode
);
1060 * Set the timeout period and then lock it in, in three
1061 * steps; captures and locks in the period.
1063 * To program the period, the SOFT_ACK_MODE must be off.
1065 mmr_image
&= ~(1L << SOFTACK_MSHIFT
);
1066 write_mmr_misc_control(pnode
, mmr_image
);
1068 * Set the 4-bit period.
1070 mmr_image
&= ~((unsigned long)0xf << SOFTACK_PSHIFT
);
1071 mmr_image
|= (SOFTACK_TIMEOUT_PERIOD
<< SOFTACK_PSHIFT
);
1072 write_mmr_misc_control(pnode
, mmr_image
);
1075 * Subsequent reversals of the timebase bit (3) cause an
1076 * immediate timeout of one or all INTD resources as
1077 * indicated in bits 2:0 (7 causes all of them to timeout).
1079 mmr_image
|= (1L << SOFTACK_MSHIFT
);
1081 mmr_image
|= (1L << UV2_LEG_SHFT
);
1082 mmr_image
|= (1L << UV2_EXT_SHFT
);
1084 write_mmr_misc_control(pnode
, mmr_image
);
1088 static void *ptc_seq_start(struct seq_file
*file
, loff_t
*offset
)
1090 if (*offset
< num_possible_cpus())
1095 static void *ptc_seq_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
1098 if (*offset
< num_possible_cpus())
1103 static void ptc_seq_stop(struct seq_file
*file
, void *data
)
1107 static inline unsigned long long usec_2_cycles(unsigned long microsec
)
1110 unsigned long long cyc
;
1112 ns
= microsec
* 1000;
1113 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
1118 * Display the statistics thru /proc/sgi_uv/ptc_statistics
1119 * 'data' points to the cpu number
1120 * Note: see the descriptions in stat_description[].
1122 static int ptc_seq_show(struct seq_file
*file
, void *data
)
1124 struct ptc_stats
*stat
;
1127 cpu
= *(loff_t
*)data
;
1130 "# cpu sent stime self locals remotes ncpus localhub ");
1132 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1134 "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
1136 "resetp resett giveup sto bz throt swack recv rtime ");
1138 "all one mult none retry canc nocan reset rcan ");
1140 "disable enable\n");
1142 if (cpu
< num_possible_cpus() && cpu_online(cpu
)) {
1143 stat
= &per_cpu(ptcstats
, cpu
);
1144 /* source side statistics */
1146 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1147 cpu
, stat
->s_requestor
, cycles_2_us(stat
->s_time
),
1148 stat
->s_ntargself
, stat
->s_ntarglocals
,
1149 stat
->s_ntargremotes
, stat
->s_ntargcpu
,
1150 stat
->s_ntarglocaluvhub
, stat
->s_ntargremoteuvhub
,
1151 stat
->s_ntarguvhub
, stat
->s_ntarguvhub16
);
1152 seq_printf(file
, "%ld %ld %ld %ld %ld ",
1153 stat
->s_ntarguvhub8
, stat
->s_ntarguvhub4
,
1154 stat
->s_ntarguvhub2
, stat
->s_ntarguvhub1
,
1156 seq_printf(file
, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1157 stat
->s_retry_messages
, stat
->s_retriesok
,
1158 stat
->s_resets_plug
, stat
->s_resets_timeout
,
1159 stat
->s_giveup
, stat
->s_stimeout
,
1160 stat
->s_busy
, stat
->s_throttles
);
1162 /* destination side statistics */
1164 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1165 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu
)),
1166 stat
->d_requestee
, cycles_2_us(stat
->d_time
),
1167 stat
->d_alltlb
, stat
->d_onetlb
, stat
->d_multmsg
,
1168 stat
->d_nomsg
, stat
->d_retries
, stat
->d_canceled
,
1169 stat
->d_nocanceled
, stat
->d_resets
,
1171 seq_printf(file
, "%ld %ld\n",
1172 stat
->s_bau_disabled
, stat
->s_bau_reenabled
);
1178 * Display the tunables thru debugfs
1180 static ssize_t
tunables_read(struct file
*file
, char __user
*userbuf
,
1181 size_t count
, loff_t
*ppos
)
1186 buf
= kasprintf(GFP_KERNEL
, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1187 "max_concur plugged_delay plugsb4reset",
1188 "timeoutsb4reset ipi_reset_limit complete_threshold",
1189 "congested_response_us congested_reps congested_period",
1190 max_concurr
, plugged_delay
, plugsb4reset
,
1191 timeoutsb4reset
, ipi_reset_limit
, complete_threshold
,
1192 congested_respns_us
, congested_reps
, congested_period
);
1197 ret
= simple_read_from_buffer(userbuf
, count
, ppos
, buf
, strlen(buf
));
1203 * handle a write to /proc/sgi_uv/ptc_statistics
1204 * -1: reset the statistics
1205 * 0: display meaning of the statistics
1207 static ssize_t
ptc_proc_write(struct file
*file
, const char __user
*user
,
1208 size_t count
, loff_t
*data
)
1215 struct ptc_stats
*stat
;
1217 if (count
== 0 || count
> sizeof(optstr
))
1219 if (copy_from_user(optstr
, user
, count
))
1221 optstr
[count
- 1] = '\0';
1223 if (strict_strtol(optstr
, 10, &input_arg
) < 0) {
1224 printk(KERN_DEBUG
"%s is invalid\n", optstr
);
1228 if (input_arg
== 0) {
1229 elements
= sizeof(stat_description
)/sizeof(*stat_description
);
1230 printk(KERN_DEBUG
"# cpu: cpu number\n");
1231 printk(KERN_DEBUG
"Sender statistics:\n");
1232 for (i
= 0; i
< elements
; i
++)
1233 printk(KERN_DEBUG
"%s\n", stat_description
[i
]);
1234 } else if (input_arg
== -1) {
1235 for_each_present_cpu(cpu
) {
1236 stat
= &per_cpu(ptcstats
, cpu
);
1237 memset(stat
, 0, sizeof(struct ptc_stats
));
1244 static int local_atoi(const char *name
)
1251 val
= 10*val
+(*name
-'0');
1260 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1261 * Zero values reset them to defaults.
1263 static int parse_tunables_write(struct bau_control
*bcp
, char *instr
,
1270 int e
= sizeof(tunables
) / sizeof(*tunables
);
1272 p
= instr
+ strspn(instr
, WHITESPACE
);
1274 for (; *p
; p
= q
+ strspn(q
, WHITESPACE
)) {
1275 q
= p
+ strcspn(p
, WHITESPACE
);
1281 printk(KERN_INFO
"bau tunable error: should be %d values\n", e
);
1285 p
= instr
+ strspn(instr
, WHITESPACE
);
1287 for (cnt
= 0; *p
; p
= q
+ strspn(q
, WHITESPACE
), cnt
++) {
1288 q
= p
+ strcspn(p
, WHITESPACE
);
1289 val
= local_atoi(p
);
1293 max_concurr
= MAX_BAU_CONCURRENT
;
1294 max_concurr_const
= MAX_BAU_CONCURRENT
;
1297 if (val
< 1 || val
> bcp
->cpus_in_uvhub
) {
1299 "Error: BAU max concurrent %d is invalid\n",
1304 max_concurr_const
= val
;
1308 *tunables
[cnt
].tunp
= tunables
[cnt
].deflt
;
1310 *tunables
[cnt
].tunp
= val
;
1320 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1322 static ssize_t
tunables_write(struct file
*file
, const char __user
*user
,
1323 size_t count
, loff_t
*data
)
1328 struct bau_control
*bcp
;
1330 if (count
== 0 || count
> sizeof(instr
)-1)
1332 if (copy_from_user(instr
, user
, count
))
1335 instr
[count
] = '\0';
1337 bcp
= &per_cpu(bau_control
, smp_processor_id());
1339 ret
= parse_tunables_write(bcp
, instr
, count
);
1343 for_each_present_cpu(cpu
) {
1344 bcp
= &per_cpu(bau_control
, cpu
);
1345 bcp
->max_concurr
= max_concurr
;
1346 bcp
->max_concurr_const
= max_concurr
;
1347 bcp
->plugged_delay
= plugged_delay
;
1348 bcp
->plugsb4reset
= plugsb4reset
;
1349 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1350 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1351 bcp
->complete_threshold
= complete_threshold
;
1352 bcp
->cong_response_us
= congested_respns_us
;
1353 bcp
->cong_reps
= congested_reps
;
1354 bcp
->cong_period
= congested_period
;
1359 static const struct seq_operations uv_ptc_seq_ops
= {
1360 .start
= ptc_seq_start
,
1361 .next
= ptc_seq_next
,
1362 .stop
= ptc_seq_stop
,
1363 .show
= ptc_seq_show
1366 static int ptc_proc_open(struct inode
*inode
, struct file
*file
)
1368 return seq_open(file
, &uv_ptc_seq_ops
);
1371 static int tunables_open(struct inode
*inode
, struct file
*file
)
1376 static const struct file_operations proc_uv_ptc_operations
= {
1377 .open
= ptc_proc_open
,
1379 .write
= ptc_proc_write
,
1380 .llseek
= seq_lseek
,
1381 .release
= seq_release
,
1384 static const struct file_operations tunables_fops
= {
1385 .open
= tunables_open
,
1386 .read
= tunables_read
,
1387 .write
= tunables_write
,
1388 .llseek
= default_llseek
,
1391 static int __init
uv_ptc_init(void)
1393 struct proc_dir_entry
*proc_uv_ptc
;
1395 if (!is_uv_system())
1398 proc_uv_ptc
= proc_create(UV_PTC_BASENAME
, 0444, NULL
,
1399 &proc_uv_ptc_operations
);
1401 printk(KERN_ERR
"unable to create %s proc entry\n",
1406 tunables_dir
= debugfs_create_dir(UV_BAU_TUNABLES_DIR
, NULL
);
1407 if (!tunables_dir
) {
1408 printk(KERN_ERR
"unable to create debugfs directory %s\n",
1409 UV_BAU_TUNABLES_DIR
);
1412 tunables_file
= debugfs_create_file(UV_BAU_TUNABLES_FILE
, 0600,
1413 tunables_dir
, NULL
, &tunables_fops
);
1414 if (!tunables_file
) {
1415 printk(KERN_ERR
"unable to create debugfs file %s\n",
1416 UV_BAU_TUNABLES_FILE
);
1423 * Initialize the sending side's sending buffers.
1425 static void activation_descriptor_init(int node
, int pnode
, int base_pnode
)
1433 struct bau_desc
*bau_desc
;
1434 struct bau_desc
*bd2
;
1435 struct bau_control
*bcp
;
1438 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1439 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1441 dsize
= sizeof(struct bau_desc
) * ADP_SZ
* ITEMS_PER_DESC
;
1442 bau_desc
= kmalloc_node(dsize
, GFP_KERNEL
, node
);
1445 pa
= uv_gpa(bau_desc
); /* need the real nasid*/
1446 n
= pa
>> uv_nshift
;
1449 /* the 14-bit pnode */
1450 write_mmr_descriptor_base(pnode
, (n
<< UV_DESC_PSHIFT
| m
));
1452 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1453 * cpu even though we only use the first one; one descriptor can
1454 * describe a broadcast to 256 uv hubs.
1456 for (i
= 0, bd2
= bau_desc
; i
< (ADP_SZ
* ITEMS_PER_DESC
); i
++, bd2
++) {
1457 memset(bd2
, 0, sizeof(struct bau_desc
));
1458 bd2
->header
.swack_flag
= 1;
1460 * The base_dest_nasid set in the message header is the nasid
1461 * of the first uvhub in the partition. The bit map will
1462 * indicate destination pnode numbers relative to that base.
1463 * They may not be consecutive if nasid striding is being used.
1465 bd2
->header
.base_dest_nasid
= UV_PNODE_TO_NASID(base_pnode
);
1466 bd2
->header
.dest_subnodeid
= UV_LB_SUBNODEID
;
1467 bd2
->header
.command
= UV_NET_ENDPOINT_INTD
;
1468 bd2
->header
.int_both
= 1;
1470 * all others need to be set to zero:
1471 * fairness chaining multilevel count replied_to
1474 for_each_present_cpu(cpu
) {
1475 if (pnode
!= uv_blade_to_pnode(uv_cpu_to_blade_id(cpu
)))
1477 bcp
= &per_cpu(bau_control
, cpu
);
1478 bcp
->descriptor_base
= bau_desc
;
1483 * initialize the destination side's receiving buffers
1484 * entered for each uvhub in the partition
1485 * - node is first node (kernel memory notion) on the uvhub
1486 * - pnode is the uvhub's physical identifier
1488 static void pq_init(int node
, int pnode
)
1495 unsigned long first
;
1496 unsigned long pn_first
;
1498 struct bau_pq_entry
*pqp
;
1499 struct bau_control
*bcp
;
1501 plsize
= (DEST_Q_SIZE
+ 1) * sizeof(struct bau_pq_entry
);
1502 vp
= kmalloc_node(plsize
, GFP_KERNEL
, node
);
1503 pqp
= (struct bau_pq_entry
*)vp
;
1506 cp
= (char *)pqp
+ 31;
1507 pqp
= (struct bau_pq_entry
*)(((unsigned long)cp
>> 5) << 5);
1509 for_each_present_cpu(cpu
) {
1510 if (pnode
!= uv_cpu_to_pnode(cpu
))
1512 /* for every cpu on this pnode: */
1513 bcp
= &per_cpu(bau_control
, cpu
);
1514 bcp
->queue_first
= pqp
;
1515 bcp
->bau_msg_head
= pqp
;
1516 bcp
->queue_last
= pqp
+ (DEST_Q_SIZE
- 1);
1519 * need the pnode of where the memory was really allocated
1521 pn
= uv_gpa(pqp
) >> uv_nshift
;
1522 first
= uv_physnodeaddr(pqp
);
1523 pn_first
= ((unsigned long)pn
<< UV_PAYLOADQ_PNODE_SHIFT
) | first
;
1524 last
= uv_physnodeaddr(pqp
+ (DEST_Q_SIZE
- 1));
1525 write_mmr_payload_first(pnode
, pn_first
);
1526 write_mmr_payload_tail(pnode
, first
);
1527 write_mmr_payload_last(pnode
, last
);
1529 /* in effect, all msg_type's are set to MSG_NOOP */
1530 memset(pqp
, 0, sizeof(struct bau_pq_entry
) * DEST_Q_SIZE
);
1534 * Initialization of each UV hub's structures
1536 static void __init
init_uvhub(int uvhub
, int vector
, int base_pnode
)
1540 unsigned long apicid
;
1542 node
= uvhub_to_first_node(uvhub
);
1543 pnode
= uv_blade_to_pnode(uvhub
);
1545 activation_descriptor_init(node
, pnode
, base_pnode
);
1547 pq_init(node
, pnode
);
1549 * The below initialization can't be in firmware because the
1550 * messaging IRQ will be determined by the OS.
1552 apicid
= uvhub_to_first_apicid(uvhub
) | uv_apicid_hibits
;
1553 write_mmr_data_config(pnode
, ((apicid
<< 32) | vector
));
1557 * We will set BAU_MISC_CONTROL with a timeout period.
1558 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1559 * So the destination timeout period has to be calculated from them.
1561 static int calculate_destination_timeout(void)
1563 unsigned long mmr_image
;
1569 unsigned long ts_ns
;
1572 mult1
= SOFTACK_TIMEOUT_PERIOD
& BAU_MISC_CONTROL_MULT_MASK
;
1573 mmr_image
= uv_read_local_mmr(UVH_AGING_PRESCALE_SEL
);
1574 index
= (mmr_image
>> BAU_URGENCY_7_SHIFT
) & BAU_URGENCY_7_MASK
;
1575 mmr_image
= uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT
);
1576 mult2
= (mmr_image
>> BAU_TRANS_SHIFT
) & BAU_TRANS_MASK
;
1577 base
= timeout_base_ns
[index
];
1578 ts_ns
= base
* mult1
* mult2
;
1581 /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
1582 mmr_image
= uv_read_local_mmr(UVH_AGING_PRESCALE_SEL
);
1583 mmr_image
= (mmr_image
& UV_SA_MASK
) >> UV_SA_SHFT
;
1584 if (mmr_image
& (1L << UV2_ACK_UNITS_SHFT
))
1588 base
= mmr_image
& UV2_ACK_MASK
;
1594 static void __init
init_per_cpu_tunables(void)
1597 struct bau_control
*bcp
;
1599 for_each_present_cpu(cpu
) {
1600 bcp
= &per_cpu(bau_control
, cpu
);
1601 bcp
->baudisabled
= 0;
1602 bcp
->statp
= &per_cpu(ptcstats
, cpu
);
1603 /* time interval to catch a hardware stay-busy bug */
1604 bcp
->timeout_interval
= usec_2_cycles(2*timeout_us
);
1605 bcp
->max_concurr
= max_concurr
;
1606 bcp
->max_concurr_const
= max_concurr
;
1607 bcp
->plugged_delay
= plugged_delay
;
1608 bcp
->plugsb4reset
= plugsb4reset
;
1609 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1610 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1611 bcp
->complete_threshold
= complete_threshold
;
1612 bcp
->cong_response_us
= congested_respns_us
;
1613 bcp
->cong_reps
= congested_reps
;
1614 bcp
->cong_period
= congested_period
;
1619 * Scan all cpus to collect blade and socket summaries.
1621 static int __init
get_cpu_topology(int base_pnode
,
1622 struct uvhub_desc
*uvhub_descs
,
1623 unsigned char *uvhub_mask
)
1629 struct bau_control
*bcp
;
1630 struct uvhub_desc
*bdp
;
1631 struct socket_desc
*sdp
;
1633 for_each_present_cpu(cpu
) {
1634 bcp
= &per_cpu(bau_control
, cpu
);
1636 memset(bcp
, 0, sizeof(struct bau_control
));
1638 pnode
= uv_cpu_hub_info(cpu
)->pnode
;
1639 if ((pnode
- base_pnode
) >= UV_DISTRIBUTION_SIZE
) {
1641 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1642 cpu
, pnode
, base_pnode
, UV_DISTRIBUTION_SIZE
);
1646 bcp
->osnode
= cpu_to_node(cpu
);
1647 bcp
->partition_base_pnode
= base_pnode
;
1649 uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
1650 *(uvhub_mask
+ (uvhub
/8)) |= (1 << (uvhub
%8));
1651 bdp
= &uvhub_descs
[uvhub
];
1657 /* kludge: 'assuming' one node per socket, and assuming that
1658 disabling a socket just leaves a gap in node numbers */
1659 socket
= bcp
->osnode
& 1;
1660 bdp
->socket_mask
|= (1 << socket
);
1661 sdp
= &bdp
->socket
[socket
];
1662 sdp
->cpu_number
[sdp
->num_cpus
] = cpu
;
1664 if (sdp
->num_cpus
> MAX_CPUS_PER_SOCKET
) {
1665 printk(KERN_EMERG
"%d cpus per socket invalid\n",
1674 * Each socket is to get a local array of pnodes/hubs.
1676 static void make_per_cpu_thp(struct bau_control
*smaster
)
1679 size_t hpsz
= sizeof(struct hub_and_pnode
) * num_possible_cpus();
1681 smaster
->thp
= kmalloc_node(hpsz
, GFP_KERNEL
, smaster
->osnode
);
1682 memset(smaster
->thp
, 0, hpsz
);
1683 for_each_present_cpu(cpu
) {
1684 smaster
->thp
[cpu
].pnode
= uv_cpu_hub_info(cpu
)->pnode
;
1685 smaster
->thp
[cpu
].uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
1690 * Initialize all the per_cpu information for the cpu's on a given socket,
1691 * given what has been gathered into the socket_desc struct.
1692 * And reports the chosen hub and socket masters back to the caller.
1694 static int scan_sock(struct socket_desc
*sdp
, struct uvhub_desc
*bdp
,
1695 struct bau_control
**smasterp
,
1696 struct bau_control
**hmasterp
)
1700 struct bau_control
*bcp
;
1702 for (i
= 0; i
< sdp
->num_cpus
; i
++) {
1703 cpu
= sdp
->cpu_number
[i
];
1704 bcp
= &per_cpu(bau_control
, cpu
);
1711 bcp
->cpus_in_uvhub
= bdp
->num_cpus
;
1712 bcp
->cpus_in_socket
= sdp
->num_cpus
;
1713 bcp
->socket_master
= *smasterp
;
1714 bcp
->uvhub
= bdp
->uvhub
;
1715 bcp
->uvhub_master
= *hmasterp
;
1716 bcp
->uvhub_cpu
= uv_cpu_hub_info(cpu
)->blade_processor_id
;
1717 if (bcp
->uvhub_cpu
>= MAX_CPUS_PER_UVHUB
) {
1718 printk(KERN_EMERG
"%d cpus per uvhub invalid\n",
1727 * Summarize the blade and socket topology into the per_cpu structures.
1729 static int __init
summarize_uvhub_sockets(int nuvhubs
,
1730 struct uvhub_desc
*uvhub_descs
,
1731 unsigned char *uvhub_mask
)
1735 unsigned short socket_mask
;
1737 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
1738 struct uvhub_desc
*bdp
;
1739 struct bau_control
*smaster
= NULL
;
1740 struct bau_control
*hmaster
= NULL
;
1742 if (!(*(uvhub_mask
+ (uvhub
/8)) & (1 << (uvhub
%8))))
1745 bdp
= &uvhub_descs
[uvhub
];
1746 socket_mask
= bdp
->socket_mask
;
1748 while (socket_mask
) {
1749 struct socket_desc
*sdp
;
1750 if ((socket_mask
& 1)) {
1751 sdp
= &bdp
->socket
[socket
];
1752 if (scan_sock(sdp
, bdp
, &smaster
, &hmaster
))
1756 socket_mask
= (socket_mask
>> 1);
1757 make_per_cpu_thp(smaster
);
1764 * initialize the bau_control structure for each cpu
1766 static int __init
init_per_cpu(int nuvhubs
, int base_part_pnode
)
1768 unsigned char *uvhub_mask
;
1770 struct uvhub_desc
*uvhub_descs
;
1772 timeout_us
= calculate_destination_timeout();
1774 vp
= kmalloc(nuvhubs
* sizeof(struct uvhub_desc
), GFP_KERNEL
);
1775 uvhub_descs
= (struct uvhub_desc
*)vp
;
1776 memset(uvhub_descs
, 0, nuvhubs
* sizeof(struct uvhub_desc
));
1777 uvhub_mask
= kzalloc((nuvhubs
+7)/8, GFP_KERNEL
);
1779 if (get_cpu_topology(base_part_pnode
, uvhub_descs
, uvhub_mask
))
1782 if (summarize_uvhub_sockets(nuvhubs
, uvhub_descs
, uvhub_mask
))
1787 init_per_cpu_tunables();
1792 * Initialization of BAU-related structures
1794 static int __init
uv_bau_init(void)
1802 cpumask_var_t
*mask
;
1804 if (!is_uv_system())
1810 for_each_possible_cpu(cur_cpu
) {
1811 mask
= &per_cpu(uv_flush_tlb_mask
, cur_cpu
);
1812 zalloc_cpumask_var_node(mask
, GFP_KERNEL
, cpu_to_node(cur_cpu
));
1815 uv_nshift
= uv_hub_info
->m_val
;
1816 uv_mmask
= (1UL << uv_hub_info
->m_val
) - 1;
1817 nuvhubs
= uv_num_possible_blades();
1818 spin_lock_init(&disable_lock
);
1819 congested_cycles
= usec_2_cycles(congested_respns_us
);
1821 uv_base_pnode
= 0x7fffffff;
1822 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
1823 cpus
= uv_blade_nr_possible_cpus(uvhub
);
1824 if (cpus
&& (uv_blade_to_pnode(uvhub
) < uv_base_pnode
))
1825 uv_base_pnode
= uv_blade_to_pnode(uvhub
);
1828 if (init_per_cpu(nuvhubs
, uv_base_pnode
)) {
1833 vector
= UV_BAU_MESSAGE
;
1834 for_each_possible_blade(uvhub
)
1835 if (uv_blade_nr_possible_cpus(uvhub
))
1836 init_uvhub(uvhub
, vector
, uv_base_pnode
);
1839 alloc_intr_gate(vector
, uv_bau_message_intr1
);
1841 for_each_possible_blade(uvhub
) {
1842 if (uv_blade_nr_possible_cpus(uvhub
)) {
1845 pnode
= uv_blade_to_pnode(uvhub
);
1848 write_gmmr_activation(pnode
, val
);
1849 mmr
= 1; /* should be 1 to broadcast to both sockets */
1850 write_mmr_data_broadcast(pnode
, mmr
);
1856 core_initcall(uv_bau_init
);
1857 fs_initcall(uv_ptc_init
);