ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / arch / x86 / platform / uv / tlb_uv.c
blobedf435b74e85be1d57b6b5e43abaec8a4a20f614
1 /*
2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
21 #include <asm/apic.h>
22 #include <asm/idle.h>
23 #include <asm/tsc.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/timer.h>
27 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28 static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
39 static int timeout_us;
40 static int nobau;
41 static int baudisabled;
42 static spinlock_t disable_lock;
43 static cycles_t congested_cycles;
45 /* tunables: */
46 static int max_concurr = MAX_BAU_CONCURRENT;
47 static int max_concurr_const = MAX_BAU_CONCURRENT;
48 static int plugged_delay = PLUGGED_DELAY;
49 static int plugsb4reset = PLUGSB4RESET;
50 static int timeoutsb4reset = TIMEOUTSB4RESET;
51 static int ipi_reset_limit = IPI_RESET_LIMIT;
52 static int complete_threshold = COMPLETE_THRESHOLD;
53 static int congested_respns_us = CONGESTED_RESPONSE_US;
54 static int congested_reps = CONGESTED_REPS;
55 static int congested_period = CONGESTED_PERIOD;
57 static struct tunables tunables[] = {
58 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59 {&plugged_delay, PLUGGED_DELAY},
60 {&plugsb4reset, PLUGSB4RESET},
61 {&timeoutsb4reset, TIMEOUTSB4RESET},
62 {&ipi_reset_limit, IPI_RESET_LIMIT},
63 {&complete_threshold, COMPLETE_THRESHOLD},
64 {&congested_respns_us, CONGESTED_RESPONSE_US},
65 {&congested_reps, CONGESTED_REPS},
66 {&congested_period, CONGESTED_PERIOD}
69 static struct dentry *tunables_dir;
70 static struct dentry *tunables_file;
72 /* these correspond to the statistics printed by ptc_seq_show() */
73 static char *stat_description[] = {
74 "sent: number of shootdown messages sent",
75 "stime: time spent sending messages",
76 "numuvhubs: number of hubs targeted with shootdown",
77 "numuvhubs16: number times 16 or more hubs targeted",
78 "numuvhubs8: number times 8 or more hubs targeted",
79 "numuvhubs4: number times 4 or more hubs targeted",
80 "numuvhubs2: number times 2 or more hubs targeted",
81 "numuvhubs1: number times 1 hub targeted",
82 "numcpus: number of cpus targeted with shootdown",
83 "dto: number of destination timeouts",
84 "retries: destination timeout retries sent",
85 "rok: : destination timeouts successfully retried",
86 "resetp: ipi-style resource resets for plugs",
87 "resett: ipi-style resource resets for timeouts",
88 "giveup: fall-backs to ipi-style shootdowns",
89 "sto: number of source timeouts",
90 "bz: number of stay-busy's",
91 "throt: number times spun in throttle",
92 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93 "recv: shootdown messages received",
94 "rtime: time spent processing messages",
95 "all: shootdown all-tlb messages",
96 "one: shootdown one-tlb messages",
97 "mult: interrupts that found multiple messages",
98 "none: interrupts that found no messages",
99 "retry: number of retry messages processed",
100 "canc: number messages canceled by retries",
101 "nocan: number retries that found nothing to cancel",
102 "reset: number of ipi-style reset requests processed",
103 "rcan: number messages canceled by reset requests",
104 "disable: number times use of the BAU was disabled",
105 "enable: number times use of the BAU was re-enabled"
108 static int __init
109 setup_nobau(char *arg)
111 nobau = 1;
112 return 0;
114 early_param("nobau", setup_nobau);
116 /* base pnode in this partition */
117 static int uv_base_pnode __read_mostly;
119 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
120 static DEFINE_PER_CPU(struct bau_control, bau_control);
121 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
124 * Determine the first node on a uvhub. 'Nodes' are used for kernel
125 * memory allocation.
127 static int __init uvhub_to_first_node(int uvhub)
129 int node, b;
131 for_each_online_node(node) {
132 b = uv_node_to_blade_id(node);
133 if (uvhub == b)
134 return node;
136 return -1;
140 * Determine the apicid of the first cpu on a uvhub.
142 static int __init uvhub_to_first_apicid(int uvhub)
144 int cpu;
146 for_each_present_cpu(cpu)
147 if (uvhub == uv_cpu_to_blade_id(cpu))
148 return per_cpu(x86_cpu_to_apicid, cpu);
149 return -1;
153 * Free a software acknowledge hardware resource by clearing its Pending
154 * bit. This will return a reply to the sender.
155 * If the message has timed out, a reply has already been sent by the
156 * hardware but the resource has not been released. In that case our
157 * clear of the Timeout bit (as well) will free the resource. No reply will
158 * be sent (the hardware will only do one reply per message).
160 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
162 unsigned long dw;
163 struct bau_pq_entry *msg;
165 msg = mdp->msg;
166 if (!msg->canceled) {
167 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
168 write_mmr_sw_ack(dw);
170 msg->replied_to = 1;
171 msg->swack_vec = 0;
175 * Process the receipt of a RETRY message
177 static void bau_process_retry_msg(struct msg_desc *mdp,
178 struct bau_control *bcp)
180 int i;
181 int cancel_count = 0;
182 unsigned long msg_res;
183 unsigned long mmr = 0;
184 struct bau_pq_entry *msg = mdp->msg;
185 struct bau_pq_entry *msg2;
186 struct ptc_stats *stat = bcp->statp;
188 stat->d_retries++;
190 * cancel any message from msg+1 to the retry itself
192 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
193 if (msg2 > mdp->queue_last)
194 msg2 = mdp->queue_first;
195 if (msg2 == msg)
196 break;
198 /* same conditions for cancellation as do_reset */
199 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
200 (msg2->swack_vec) && ((msg2->swack_vec &
201 msg->swack_vec) == 0) &&
202 (msg2->sending_cpu == msg->sending_cpu) &&
203 (msg2->msg_type != MSG_NOOP)) {
204 mmr = read_mmr_sw_ack();
205 msg_res = msg2->swack_vec;
207 * This is a message retry; clear the resources held
208 * by the previous message only if they timed out.
209 * If it has not timed out we have an unexpected
210 * situation to report.
212 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
213 unsigned long mr;
215 * is the resource timed out?
216 * make everyone ignore the cancelled message.
218 msg2->canceled = 1;
219 stat->d_canceled++;
220 cancel_count++;
221 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
222 write_mmr_sw_ack(mr);
226 if (!cancel_count)
227 stat->d_nocanceled++;
231 * Do all the things a cpu should do for a TLB shootdown message.
232 * Other cpu's may come here at the same time for this message.
234 static void bau_process_message(struct msg_desc *mdp,
235 struct bau_control *bcp)
237 short socket_ack_count = 0;
238 short *sp;
239 struct atomic_short *asp;
240 struct ptc_stats *stat = bcp->statp;
241 struct bau_pq_entry *msg = mdp->msg;
242 struct bau_control *smaster = bcp->socket_master;
245 * This must be a normal message, or retry of a normal message
247 if (msg->address == TLB_FLUSH_ALL) {
248 local_flush_tlb();
249 stat->d_alltlb++;
250 } else {
251 __flush_tlb_one(msg->address);
252 stat->d_onetlb++;
254 stat->d_requestee++;
257 * One cpu on each uvhub has the additional job on a RETRY
258 * of releasing the resource held by the message that is
259 * being retried. That message is identified by sending
260 * cpu number.
262 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
263 bau_process_retry_msg(mdp, bcp);
266 * This is a swack message, so we have to reply to it.
267 * Count each responding cpu on the socket. This avoids
268 * pinging the count's cache line back and forth between
269 * the sockets.
271 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
272 asp = (struct atomic_short *)sp;
273 socket_ack_count = atom_asr(1, asp);
274 if (socket_ack_count == bcp->cpus_in_socket) {
275 int msg_ack_count;
277 * Both sockets dump their completed count total into
278 * the message's count.
280 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
281 asp = (struct atomic_short *)&msg->acknowledge_count;
282 msg_ack_count = atom_asr(socket_ack_count, asp);
284 if (msg_ack_count == bcp->cpus_in_uvhub) {
286 * All cpus in uvhub saw it; reply
288 reply_to_message(mdp, bcp);
292 return;
296 * Determine the first cpu on a uvhub.
298 static int uvhub_to_first_cpu(int uvhub)
300 int cpu;
301 for_each_present_cpu(cpu)
302 if (uvhub == uv_cpu_to_blade_id(cpu))
303 return cpu;
304 return -1;
308 * Last resort when we get a large number of destination timeouts is
309 * to clear resources held by a given cpu.
310 * Do this with IPI so that all messages in the BAU message queue
311 * can be identified by their nonzero swack_vec field.
313 * This is entered for a single cpu on the uvhub.
314 * The sender want's this uvhub to free a specific message's
315 * swack resources.
317 static void do_reset(void *ptr)
319 int i;
320 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
321 struct reset_args *rap = (struct reset_args *)ptr;
322 struct bau_pq_entry *msg;
323 struct ptc_stats *stat = bcp->statp;
325 stat->d_resets++;
327 * We're looking for the given sender, and
328 * will free its swack resource.
329 * If all cpu's finally responded after the timeout, its
330 * message 'replied_to' was set.
332 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
333 unsigned long msg_res;
334 /* do_reset: same conditions for cancellation as
335 bau_process_retry_msg() */
336 if ((msg->replied_to == 0) &&
337 (msg->canceled == 0) &&
338 (msg->sending_cpu == rap->sender) &&
339 (msg->swack_vec) &&
340 (msg->msg_type != MSG_NOOP)) {
341 unsigned long mmr;
342 unsigned long mr;
344 * make everyone else ignore this message
346 msg->canceled = 1;
348 * only reset the resource if it is still pending
350 mmr = read_mmr_sw_ack();
351 msg_res = msg->swack_vec;
352 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
353 if (mmr & msg_res) {
354 stat->d_rcanceled++;
355 write_mmr_sw_ack(mr);
359 return;
363 * Use IPI to get all target uvhubs to release resources held by
364 * a given sending cpu number.
366 static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender)
368 int uvhub;
369 int maskbits;
370 cpumask_t mask;
371 struct reset_args reset_args;
373 reset_args.sender = sender;
374 cpus_clear(mask);
375 /* find a single cpu for each uvhub in this distribution mask */
376 maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE;
377 for (uvhub = 0; uvhub < maskbits; uvhub++) {
378 int cpu;
379 if (!bau_uvhub_isset(uvhub, distribution))
380 continue;
381 /* find a cpu for this uvhub */
382 cpu = uvhub_to_first_cpu(uvhub);
383 cpu_set(cpu, mask);
386 /* IPI all cpus; preemption is already disabled */
387 smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1);
388 return;
391 static inline unsigned long cycles_2_us(unsigned long long cyc)
393 unsigned long long ns;
394 unsigned long us;
395 int cpu = smp_processor_id();
397 ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
398 us = ns / 1000;
399 return us;
403 * wait for all cpus on this hub to finish their sends and go quiet
404 * leaves uvhub_quiesce set so that no new broadcasts are started by
405 * bau_flush_send_and_wait()
407 static inline void quiesce_local_uvhub(struct bau_control *hmaster)
409 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
413 * mark this quiet-requestor as done
415 static inline void end_uvhub_quiesce(struct bau_control *hmaster)
417 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
420 static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
422 unsigned long descriptor_status;
424 descriptor_status = uv_read_local_mmr(mmr_offset);
425 descriptor_status >>= right_shift;
426 descriptor_status &= UV_ACT_STATUS_MASK;
427 return descriptor_status;
431 * Wait for completion of a broadcast software ack message
432 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
434 static int uv1_wait_completion(struct bau_desc *bau_desc,
435 unsigned long mmr_offset, int right_shift,
436 struct bau_control *bcp, long try)
438 unsigned long descriptor_status;
439 cycles_t ttm;
440 struct ptc_stats *stat = bcp->statp;
442 descriptor_status = uv1_read_status(mmr_offset, right_shift);
443 /* spin on the status MMR, waiting for it to go idle */
444 while ((descriptor_status != DS_IDLE)) {
446 * Our software ack messages may be blocked because
447 * there are no swack resources available. As long
448 * as none of them has timed out hardware will NACK
449 * our message and its state will stay IDLE.
451 if (descriptor_status == DS_SOURCE_TIMEOUT) {
452 stat->s_stimeout++;
453 return FLUSH_GIVEUP;
454 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
455 stat->s_dtimeout++;
456 ttm = get_cycles();
459 * Our retries may be blocked by all destination
460 * swack resources being consumed, and a timeout
461 * pending. In that case hardware returns the
462 * ERROR that looks like a destination timeout.
464 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
465 bcp->conseccompletes = 0;
466 return FLUSH_RETRY_PLUGGED;
469 bcp->conseccompletes = 0;
470 return FLUSH_RETRY_TIMEOUT;
471 } else {
473 * descriptor_status is still BUSY
475 cpu_relax();
477 descriptor_status = uv1_read_status(mmr_offset, right_shift);
479 bcp->conseccompletes++;
480 return FLUSH_COMPLETE;
484 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
486 static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
488 unsigned long descriptor_status;
489 unsigned long descriptor_status2;
491 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
492 descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
493 descriptor_status = (descriptor_status << 1) | descriptor_status2;
494 return descriptor_status;
497 static int uv2_wait_completion(struct bau_desc *bau_desc,
498 unsigned long mmr_offset, int right_shift,
499 struct bau_control *bcp, long try)
501 unsigned long descriptor_stat;
502 cycles_t ttm;
503 int cpu = bcp->uvhub_cpu;
504 struct ptc_stats *stat = bcp->statp;
506 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
508 /* spin on the status MMR, waiting for it to go idle */
509 while (descriptor_stat != UV2H_DESC_IDLE) {
511 * Our software ack messages may be blocked because
512 * there are no swack resources available. As long
513 * as none of them has timed out hardware will NACK
514 * our message and its state will stay IDLE.
516 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
517 (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
518 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
519 stat->s_stimeout++;
520 return FLUSH_GIVEUP;
521 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
522 stat->s_dtimeout++;
523 ttm = get_cycles();
525 * Our retries may be blocked by all destination
526 * swack resources being consumed, and a timeout
527 * pending. In that case hardware returns the
528 * ERROR that looks like a destination timeout.
530 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
531 bcp->conseccompletes = 0;
532 return FLUSH_RETRY_PLUGGED;
534 bcp->conseccompletes = 0;
535 return FLUSH_RETRY_TIMEOUT;
536 } else {
538 * descriptor_stat is still BUSY
540 cpu_relax();
542 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
544 bcp->conseccompletes++;
545 return FLUSH_COMPLETE;
549 * There are 2 status registers; each and array[32] of 2 bits. Set up for
550 * which register to read and position in that register based on cpu in
551 * current hub.
553 static int wait_completion(struct bau_desc *bau_desc,
554 struct bau_control *bcp, long try)
556 int right_shift;
557 unsigned long mmr_offset;
558 int cpu = bcp->uvhub_cpu;
560 if (cpu < UV_CPUS_PER_AS) {
561 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
562 right_shift = cpu * UV_ACT_STATUS_SIZE;
563 } else {
564 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
565 right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
568 if (is_uv1_hub())
569 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
570 bcp, try);
571 else
572 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
573 bcp, try);
576 static inline cycles_t sec_2_cycles(unsigned long sec)
578 unsigned long ns;
579 cycles_t cyc;
581 ns = sec * 1000000000;
582 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
583 return cyc;
587 * Our retries are blocked by all destination sw ack resources being
588 * in use, and a timeout is pending. In that case hardware immediately
589 * returns the ERROR that looks like a destination timeout.
591 static void destination_plugged(struct bau_desc *bau_desc,
592 struct bau_control *bcp,
593 struct bau_control *hmaster, struct ptc_stats *stat)
595 udelay(bcp->plugged_delay);
596 bcp->plugged_tries++;
598 if (bcp->plugged_tries >= bcp->plugsb4reset) {
599 bcp->plugged_tries = 0;
601 quiesce_local_uvhub(hmaster);
603 spin_lock(&hmaster->queue_lock);
604 reset_with_ipi(&bau_desc->distribution, bcp->cpu);
605 spin_unlock(&hmaster->queue_lock);
607 end_uvhub_quiesce(hmaster);
609 bcp->ipi_attempts++;
610 stat->s_resets_plug++;
614 static void destination_timeout(struct bau_desc *bau_desc,
615 struct bau_control *bcp, struct bau_control *hmaster,
616 struct ptc_stats *stat)
618 hmaster->max_concurr = 1;
619 bcp->timeout_tries++;
620 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
621 bcp->timeout_tries = 0;
623 quiesce_local_uvhub(hmaster);
625 spin_lock(&hmaster->queue_lock);
626 reset_with_ipi(&bau_desc->distribution, bcp->cpu);
627 spin_unlock(&hmaster->queue_lock);
629 end_uvhub_quiesce(hmaster);
631 bcp->ipi_attempts++;
632 stat->s_resets_timeout++;
637 * Completions are taking a very long time due to a congested numalink
638 * network.
640 static void disable_for_congestion(struct bau_control *bcp,
641 struct ptc_stats *stat)
643 /* let only one cpu do this disabling */
644 spin_lock(&disable_lock);
646 if (!baudisabled && bcp->period_requests &&
647 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
648 int tcpu;
649 struct bau_control *tbcp;
650 /* it becomes this cpu's job to turn on the use of the
651 BAU again */
652 baudisabled = 1;
653 bcp->set_bau_off = 1;
654 bcp->set_bau_on_time = get_cycles();
655 bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
656 stat->s_bau_disabled++;
657 for_each_present_cpu(tcpu) {
658 tbcp = &per_cpu(bau_control, tcpu);
659 tbcp->baudisabled = 1;
663 spin_unlock(&disable_lock);
666 static void count_max_concurr(int stat, struct bau_control *bcp,
667 struct bau_control *hmaster)
669 bcp->plugged_tries = 0;
670 bcp->timeout_tries = 0;
671 if (stat != FLUSH_COMPLETE)
672 return;
673 if (bcp->conseccompletes <= bcp->complete_threshold)
674 return;
675 if (hmaster->max_concurr >= hmaster->max_concurr_const)
676 return;
677 hmaster->max_concurr++;
680 static void record_send_stats(cycles_t time1, cycles_t time2,
681 struct bau_control *bcp, struct ptc_stats *stat,
682 int completion_status, int try)
684 cycles_t elapsed;
686 if (time2 > time1) {
687 elapsed = time2 - time1;
688 stat->s_time += elapsed;
690 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
691 bcp->period_requests++;
692 bcp->period_time += elapsed;
693 if ((elapsed > congested_cycles) &&
694 (bcp->period_requests > bcp->cong_reps))
695 disable_for_congestion(bcp, stat);
697 } else
698 stat->s_requestor--;
700 if (completion_status == FLUSH_COMPLETE && try > 1)
701 stat->s_retriesok++;
702 else if (completion_status == FLUSH_GIVEUP)
703 stat->s_giveup++;
707 * Because of a uv1 hardware bug only a limited number of concurrent
708 * requests can be made.
710 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
712 spinlock_t *lock = &hmaster->uvhub_lock;
713 atomic_t *v;
715 v = &hmaster->active_descriptor_count;
716 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
717 stat->s_throttles++;
718 do {
719 cpu_relax();
720 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
725 * Handle the completion status of a message send.
727 static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
728 struct bau_control *bcp, struct bau_control *hmaster,
729 struct ptc_stats *stat)
731 if (completion_status == FLUSH_RETRY_PLUGGED)
732 destination_plugged(bau_desc, bcp, hmaster, stat);
733 else if (completion_status == FLUSH_RETRY_TIMEOUT)
734 destination_timeout(bau_desc, bcp, hmaster, stat);
738 * Send a broadcast and wait for it to complete.
740 * The flush_mask contains the cpus the broadcast is to be sent to including
741 * cpus that are on the local uvhub.
743 * Returns 0 if all flushing represented in the mask was done.
744 * Returns 1 if it gives up entirely and the original cpu mask is to be
745 * returned to the kernel.
747 int uv_flush_send_and_wait(struct bau_desc *bau_desc,
748 struct cpumask *flush_mask, struct bau_control *bcp)
750 int seq_number = 0;
751 int completion_stat = 0;
752 long try = 0;
753 unsigned long index;
754 cycles_t time1;
755 cycles_t time2;
756 struct ptc_stats *stat = bcp->statp;
757 struct bau_control *hmaster = bcp->uvhub_master;
759 if (is_uv1_hub())
760 uv1_throttle(hmaster, stat);
762 while (hmaster->uvhub_quiesce)
763 cpu_relax();
765 time1 = get_cycles();
766 do {
767 if (try == 0) {
768 bau_desc->header.msg_type = MSG_REGULAR;
769 seq_number = bcp->message_number++;
770 } else {
771 bau_desc->header.msg_type = MSG_RETRY;
772 stat->s_retry_messages++;
775 bau_desc->header.sequence = seq_number;
776 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
777 bcp->send_message = get_cycles();
779 write_mmr_activation(index);
781 try++;
782 completion_stat = wait_completion(bau_desc, bcp, try);
784 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
786 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
787 bcp->ipi_attempts = 0;
788 completion_stat = FLUSH_GIVEUP;
789 break;
791 cpu_relax();
792 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
793 (completion_stat == FLUSH_RETRY_TIMEOUT));
795 time2 = get_cycles();
797 count_max_concurr(completion_stat, bcp, hmaster);
799 while (hmaster->uvhub_quiesce)
800 cpu_relax();
802 atomic_dec(&hmaster->active_descriptor_count);
804 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
806 if (completion_stat == FLUSH_GIVEUP)
807 return 1;
808 return 0;
812 * The BAU is disabled. When the disabled time period has expired, the cpu
813 * that disabled it must re-enable it.
814 * Return 0 if it is re-enabled for all cpus.
816 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
818 int tcpu;
819 struct bau_control *tbcp;
821 if (bcp->set_bau_off) {
822 if (get_cycles() >= bcp->set_bau_on_time) {
823 stat->s_bau_reenabled++;
824 baudisabled = 0;
825 for_each_present_cpu(tcpu) {
826 tbcp = &per_cpu(bau_control, tcpu);
827 tbcp->baudisabled = 0;
828 tbcp->period_requests = 0;
829 tbcp->period_time = 0;
831 return 0;
834 return -1;
837 static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
838 int remotes, struct bau_desc *bau_desc)
840 stat->s_requestor++;
841 stat->s_ntargcpu += remotes + locals;
842 stat->s_ntargremotes += remotes;
843 stat->s_ntarglocals += locals;
845 /* uvhub statistics */
846 hubs = bau_uvhub_weight(&bau_desc->distribution);
847 if (locals) {
848 stat->s_ntarglocaluvhub++;
849 stat->s_ntargremoteuvhub += (hubs - 1);
850 } else
851 stat->s_ntargremoteuvhub += hubs;
853 stat->s_ntarguvhub += hubs;
855 if (hubs >= 16)
856 stat->s_ntarguvhub16++;
857 else if (hubs >= 8)
858 stat->s_ntarguvhub8++;
859 else if (hubs >= 4)
860 stat->s_ntarguvhub4++;
861 else if (hubs >= 2)
862 stat->s_ntarguvhub2++;
863 else
864 stat->s_ntarguvhub1++;
868 * Translate a cpu mask to the uvhub distribution mask in the BAU
869 * activation descriptor.
871 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
872 struct bau_desc *bau_desc, int *localsp, int *remotesp)
874 int cpu;
875 int pnode;
876 int cnt = 0;
877 struct hub_and_pnode *hpp;
879 for_each_cpu(cpu, flush_mask) {
881 * The distribution vector is a bit map of pnodes, relative
882 * to the partition base pnode (and the partition base nasid
883 * in the header).
884 * Translate cpu to pnode and hub using a local memory array.
886 hpp = &bcp->socket_master->thp[cpu];
887 pnode = hpp->pnode - bcp->partition_base_pnode;
888 bau_uvhub_set(pnode, &bau_desc->distribution);
889 cnt++;
890 if (hpp->uvhub == bcp->uvhub)
891 (*localsp)++;
892 else
893 (*remotesp)++;
895 if (!cnt)
896 return 1;
897 return 0;
901 * globally purge translation cache of a virtual address or all TLB's
902 * @cpumask: mask of all cpu's in which the address is to be removed
903 * @mm: mm_struct containing virtual address range
904 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
905 * @cpu: the current cpu
907 * This is the entry point for initiating any UV global TLB shootdown.
909 * Purges the translation caches of all specified processors of the given
910 * virtual address, or purges all TLB's on specified processors.
912 * The caller has derived the cpumask from the mm_struct. This function
913 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
915 * The cpumask is converted into a uvhubmask of the uvhubs containing
916 * those cpus.
918 * Note that this function should be called with preemption disabled.
920 * Returns NULL if all remote flushing was done.
921 * Returns pointer to cpumask if some remote flushing remains to be
922 * done. The returned pointer is valid till preemption is re-enabled.
924 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
925 struct mm_struct *mm, unsigned long va,
926 unsigned int cpu)
928 int locals = 0;
929 int remotes = 0;
930 int hubs = 0;
931 struct bau_desc *bau_desc;
932 struct cpumask *flush_mask;
933 struct ptc_stats *stat;
934 struct bau_control *bcp;
936 /* kernel was booted 'nobau' */
937 if (nobau)
938 return cpumask;
940 bcp = &per_cpu(bau_control, cpu);
941 stat = bcp->statp;
943 /* bau was disabled due to slow response */
944 if (bcp->baudisabled) {
945 if (check_enable(bcp, stat))
946 return cpumask;
950 * Each sending cpu has a per-cpu mask which it fills from the caller's
951 * cpu mask. All cpus are converted to uvhubs and copied to the
952 * activation descriptor.
954 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
955 /* don't actually do a shootdown of the local cpu */
956 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
958 if (cpu_isset(cpu, *cpumask))
959 stat->s_ntargself++;
961 bau_desc = bcp->descriptor_base;
962 bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
963 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
964 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
965 return NULL;
967 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
969 bau_desc->payload.address = va;
970 bau_desc->payload.sending_cpu = cpu;
972 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
973 * or 1 if it gave up and the original cpumask should be returned.
975 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
976 return NULL;
977 else
978 return cpumask;
982 * The BAU message interrupt comes here. (registered by set_intr_gate)
983 * See entry_64.S
985 * We received a broadcast assist message.
987 * Interrupts are disabled; this interrupt could represent
988 * the receipt of several messages.
990 * All cores/threads on this hub get this interrupt.
991 * The last one to see it does the software ack.
992 * (the resource will not be freed until noninterruptable cpus see this
993 * interrupt; hardware may timeout the s/w ack and reply ERROR)
995 void uv_bau_message_interrupt(struct pt_regs *regs)
997 int count = 0;
998 cycles_t time_start;
999 struct bau_pq_entry *msg;
1000 struct bau_control *bcp;
1001 struct ptc_stats *stat;
1002 struct msg_desc msgdesc;
1004 time_start = get_cycles();
1006 bcp = &per_cpu(bau_control, smp_processor_id());
1007 stat = bcp->statp;
1009 msgdesc.queue_first = bcp->queue_first;
1010 msgdesc.queue_last = bcp->queue_last;
1012 msg = bcp->bau_msg_head;
1013 while (msg->swack_vec) {
1014 count++;
1016 msgdesc.msg_slot = msg - msgdesc.queue_first;
1017 msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
1018 msgdesc.msg = msg;
1019 bau_process_message(&msgdesc, bcp);
1021 msg++;
1022 if (msg > msgdesc.queue_last)
1023 msg = msgdesc.queue_first;
1024 bcp->bau_msg_head = msg;
1026 stat->d_time += (get_cycles() - time_start);
1027 if (!count)
1028 stat->d_nomsg++;
1029 else if (count > 1)
1030 stat->d_multmsg++;
1032 ack_APIC_irq();
1036 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1037 * shootdown message timeouts enabled. The timeout does not cause
1038 * an interrupt, but causes an error message to be returned to
1039 * the sender.
1041 static void __init enable_timeouts(void)
1043 int uvhub;
1044 int nuvhubs;
1045 int pnode;
1046 unsigned long mmr_image;
1048 nuvhubs = uv_num_possible_blades();
1050 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1051 if (!uv_blade_nr_possible_cpus(uvhub))
1052 continue;
1054 pnode = uv_blade_to_pnode(uvhub);
1055 mmr_image = read_mmr_misc_control(pnode);
1057 * Set the timeout period and then lock it in, in three
1058 * steps; captures and locks in the period.
1060 * To program the period, the SOFT_ACK_MODE must be off.
1062 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1063 write_mmr_misc_control(pnode, mmr_image);
1065 * Set the 4-bit period.
1067 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1068 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1069 write_mmr_misc_control(pnode, mmr_image);
1071 * UV1:
1072 * Subsequent reversals of the timebase bit (3) cause an
1073 * immediate timeout of one or all INTD resources as
1074 * indicated in bits 2:0 (7 causes all of them to timeout).
1076 mmr_image |= (1L << SOFTACK_MSHIFT);
1077 if (is_uv2_hub()) {
1078 mmr_image |= (1L << UV2_LEG_SHFT);
1079 mmr_image |= (1L << UV2_EXT_SHFT);
1081 write_mmr_misc_control(pnode, mmr_image);
1085 static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1087 if (*offset < num_possible_cpus())
1088 return offset;
1089 return NULL;
1092 static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1094 (*offset)++;
1095 if (*offset < num_possible_cpus())
1096 return offset;
1097 return NULL;
1100 static void ptc_seq_stop(struct seq_file *file, void *data)
1104 static inline unsigned long long usec_2_cycles(unsigned long microsec)
1106 unsigned long ns;
1107 unsigned long long cyc;
1109 ns = microsec * 1000;
1110 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1111 return cyc;
1115 * Display the statistics thru /proc/sgi_uv/ptc_statistics
1116 * 'data' points to the cpu number
1117 * Note: see the descriptions in stat_description[].
1119 static int ptc_seq_show(struct seq_file *file, void *data)
1121 struct ptc_stats *stat;
1122 int cpu;
1124 cpu = *(loff_t *)data;
1125 if (!cpu) {
1126 seq_printf(file,
1127 "# cpu sent stime self locals remotes ncpus localhub ");
1128 seq_printf(file,
1129 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1130 seq_printf(file,
1131 "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
1132 seq_printf(file,
1133 "resetp resett giveup sto bz throt swack recv rtime ");
1134 seq_printf(file,
1135 "all one mult none retry canc nocan reset rcan ");
1136 seq_printf(file,
1137 "disable enable\n");
1139 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1140 stat = &per_cpu(ptcstats, cpu);
1141 /* source side statistics */
1142 seq_printf(file,
1143 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1144 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
1145 stat->s_ntargself, stat->s_ntarglocals,
1146 stat->s_ntargremotes, stat->s_ntargcpu,
1147 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1148 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1149 seq_printf(file, "%ld %ld %ld %ld %ld ",
1150 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1151 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1152 stat->s_dtimeout);
1153 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1154 stat->s_retry_messages, stat->s_retriesok,
1155 stat->s_resets_plug, stat->s_resets_timeout,
1156 stat->s_giveup, stat->s_stimeout,
1157 stat->s_busy, stat->s_throttles);
1159 /* destination side statistics */
1160 seq_printf(file,
1161 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1162 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
1163 stat->d_requestee, cycles_2_us(stat->d_time),
1164 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1165 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1166 stat->d_nocanceled, stat->d_resets,
1167 stat->d_rcanceled);
1168 seq_printf(file, "%ld %ld\n",
1169 stat->s_bau_disabled, stat->s_bau_reenabled);
1171 return 0;
1175 * Display the tunables thru debugfs
1177 static ssize_t tunables_read(struct file *file, char __user *userbuf,
1178 size_t count, loff_t *ppos)
1180 char *buf;
1181 int ret;
1183 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1184 "max_concur plugged_delay plugsb4reset",
1185 "timeoutsb4reset ipi_reset_limit complete_threshold",
1186 "congested_response_us congested_reps congested_period",
1187 max_concurr, plugged_delay, plugsb4reset,
1188 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1189 congested_respns_us, congested_reps, congested_period);
1191 if (!buf)
1192 return -ENOMEM;
1194 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1195 kfree(buf);
1196 return ret;
1200 * handle a write to /proc/sgi_uv/ptc_statistics
1201 * -1: reset the statistics
1202 * 0: display meaning of the statistics
1204 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1205 size_t count, loff_t *data)
1207 int cpu;
1208 int i;
1209 int elements;
1210 long input_arg;
1211 char optstr[64];
1212 struct ptc_stats *stat;
1214 if (count == 0 || count > sizeof(optstr))
1215 return -EINVAL;
1216 if (copy_from_user(optstr, user, count))
1217 return -EFAULT;
1218 optstr[count - 1] = '\0';
1220 if (strict_strtol(optstr, 10, &input_arg) < 0) {
1221 printk(KERN_DEBUG "%s is invalid\n", optstr);
1222 return -EINVAL;
1225 if (input_arg == 0) {
1226 elements = sizeof(stat_description)/sizeof(*stat_description);
1227 printk(KERN_DEBUG "# cpu: cpu number\n");
1228 printk(KERN_DEBUG "Sender statistics:\n");
1229 for (i = 0; i < elements; i++)
1230 printk(KERN_DEBUG "%s\n", stat_description[i]);
1231 } else if (input_arg == -1) {
1232 for_each_present_cpu(cpu) {
1233 stat = &per_cpu(ptcstats, cpu);
1234 memset(stat, 0, sizeof(struct ptc_stats));
1238 return count;
1241 static int local_atoi(const char *name)
1243 int val = 0;
1245 for (;; name++) {
1246 switch (*name) {
1247 case '0' ... '9':
1248 val = 10*val+(*name-'0');
1249 break;
1250 default:
1251 return val;
1257 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1258 * Zero values reset them to defaults.
1260 static int parse_tunables_write(struct bau_control *bcp, char *instr,
1261 int count)
1263 char *p;
1264 char *q;
1265 int cnt = 0;
1266 int val;
1267 int e = sizeof(tunables) / sizeof(*tunables);
1269 p = instr + strspn(instr, WHITESPACE);
1270 q = p;
1271 for (; *p; p = q + strspn(q, WHITESPACE)) {
1272 q = p + strcspn(p, WHITESPACE);
1273 cnt++;
1274 if (q == p)
1275 break;
1277 if (cnt != e) {
1278 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
1279 return -EINVAL;
1282 p = instr + strspn(instr, WHITESPACE);
1283 q = p;
1284 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1285 q = p + strcspn(p, WHITESPACE);
1286 val = local_atoi(p);
1287 switch (cnt) {
1288 case 0:
1289 if (val == 0) {
1290 max_concurr = MAX_BAU_CONCURRENT;
1291 max_concurr_const = MAX_BAU_CONCURRENT;
1292 continue;
1294 if (val < 1 || val > bcp->cpus_in_uvhub) {
1295 printk(KERN_DEBUG
1296 "Error: BAU max concurrent %d is invalid\n",
1297 val);
1298 return -EINVAL;
1300 max_concurr = val;
1301 max_concurr_const = val;
1302 continue;
1303 default:
1304 if (val == 0)
1305 *tunables[cnt].tunp = tunables[cnt].deflt;
1306 else
1307 *tunables[cnt].tunp = val;
1308 continue;
1310 if (q == p)
1311 break;
1313 return 0;
1317 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1319 static ssize_t tunables_write(struct file *file, const char __user *user,
1320 size_t count, loff_t *data)
1322 int cpu;
1323 int ret;
1324 char instr[100];
1325 struct bau_control *bcp;
1327 if (count == 0 || count > sizeof(instr)-1)
1328 return -EINVAL;
1329 if (copy_from_user(instr, user, count))
1330 return -EFAULT;
1332 instr[count] = '\0';
1334 bcp = &per_cpu(bau_control, smp_processor_id());
1336 ret = parse_tunables_write(bcp, instr, count);
1337 if (ret)
1338 return ret;
1340 for_each_present_cpu(cpu) {
1341 bcp = &per_cpu(bau_control, cpu);
1342 bcp->max_concurr = max_concurr;
1343 bcp->max_concurr_const = max_concurr;
1344 bcp->plugged_delay = plugged_delay;
1345 bcp->plugsb4reset = plugsb4reset;
1346 bcp->timeoutsb4reset = timeoutsb4reset;
1347 bcp->ipi_reset_limit = ipi_reset_limit;
1348 bcp->complete_threshold = complete_threshold;
1349 bcp->cong_response_us = congested_respns_us;
1350 bcp->cong_reps = congested_reps;
1351 bcp->cong_period = congested_period;
1353 return count;
1356 static const struct seq_operations uv_ptc_seq_ops = {
1357 .start = ptc_seq_start,
1358 .next = ptc_seq_next,
1359 .stop = ptc_seq_stop,
1360 .show = ptc_seq_show
1363 static int ptc_proc_open(struct inode *inode, struct file *file)
1365 return seq_open(file, &uv_ptc_seq_ops);
1368 static int tunables_open(struct inode *inode, struct file *file)
1370 return 0;
1373 static const struct file_operations proc_uv_ptc_operations = {
1374 .open = ptc_proc_open,
1375 .read = seq_read,
1376 .write = ptc_proc_write,
1377 .llseek = seq_lseek,
1378 .release = seq_release,
1381 static const struct file_operations tunables_fops = {
1382 .open = tunables_open,
1383 .read = tunables_read,
1384 .write = tunables_write,
1385 .llseek = default_llseek,
1388 static int __init uv_ptc_init(void)
1390 struct proc_dir_entry *proc_uv_ptc;
1392 if (!is_uv_system())
1393 return 0;
1395 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1396 &proc_uv_ptc_operations);
1397 if (!proc_uv_ptc) {
1398 printk(KERN_ERR "unable to create %s proc entry\n",
1399 UV_PTC_BASENAME);
1400 return -EINVAL;
1403 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1404 if (!tunables_dir) {
1405 printk(KERN_ERR "unable to create debugfs directory %s\n",
1406 UV_BAU_TUNABLES_DIR);
1407 return -EINVAL;
1409 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1410 tunables_dir, NULL, &tunables_fops);
1411 if (!tunables_file) {
1412 printk(KERN_ERR "unable to create debugfs file %s\n",
1413 UV_BAU_TUNABLES_FILE);
1414 return -EINVAL;
1416 return 0;
1420 * Initialize the sending side's sending buffers.
1422 static void activation_descriptor_init(int node, int pnode, int base_pnode)
1424 int i;
1425 int cpu;
1426 unsigned long gpa;
1427 unsigned long m;
1428 unsigned long n;
1429 size_t dsize;
1430 struct bau_desc *bau_desc;
1431 struct bau_desc *bd2;
1432 struct bau_control *bcp;
1435 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1436 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1438 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1439 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1440 BUG_ON(!bau_desc);
1442 gpa = uv_gpa(bau_desc);
1443 n = uv_gpa_to_gnode(gpa);
1444 m = uv_gpa_to_offset(gpa);
1446 /* the 14-bit pnode */
1447 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1449 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1450 * cpu even though we only use the first one; one descriptor can
1451 * describe a broadcast to 256 uv hubs.
1453 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1454 memset(bd2, 0, sizeof(struct bau_desc));
1455 bd2->header.swack_flag = 1;
1457 * The base_dest_nasid set in the message header is the nasid
1458 * of the first uvhub in the partition. The bit map will
1459 * indicate destination pnode numbers relative to that base.
1460 * They may not be consecutive if nasid striding is being used.
1462 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1463 bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1464 bd2->header.command = UV_NET_ENDPOINT_INTD;
1465 bd2->header.int_both = 1;
1467 * all others need to be set to zero:
1468 * fairness chaining multilevel count replied_to
1471 for_each_present_cpu(cpu) {
1472 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1473 continue;
1474 bcp = &per_cpu(bau_control, cpu);
1475 bcp->descriptor_base = bau_desc;
1480 * initialize the destination side's receiving buffers
1481 * entered for each uvhub in the partition
1482 * - node is first node (kernel memory notion) on the uvhub
1483 * - pnode is the uvhub's physical identifier
1485 static void pq_init(int node, int pnode)
1487 int cpu;
1488 size_t plsize;
1489 char *cp;
1490 void *vp;
1491 unsigned long pn;
1492 unsigned long first;
1493 unsigned long pn_first;
1494 unsigned long last;
1495 struct bau_pq_entry *pqp;
1496 struct bau_control *bcp;
1498 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1499 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1500 pqp = (struct bau_pq_entry *)vp;
1501 BUG_ON(!pqp);
1503 cp = (char *)pqp + 31;
1504 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1506 for_each_present_cpu(cpu) {
1507 if (pnode != uv_cpu_to_pnode(cpu))
1508 continue;
1509 /* for every cpu on this pnode: */
1510 bcp = &per_cpu(bau_control, cpu);
1511 bcp->queue_first = pqp;
1512 bcp->bau_msg_head = pqp;
1513 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
1516 * need the gnode of where the memory was really allocated
1518 pn = uv_gpa_to_gnode(uv_gpa(pqp));
1519 first = uv_physnodeaddr(pqp);
1520 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1521 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1522 write_mmr_payload_first(pnode, pn_first);
1523 write_mmr_payload_tail(pnode, first);
1524 write_mmr_payload_last(pnode, last);
1526 /* in effect, all msg_type's are set to MSG_NOOP */
1527 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1531 * Initialization of each UV hub's structures
1533 static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1535 int node;
1536 int pnode;
1537 unsigned long apicid;
1539 node = uvhub_to_first_node(uvhub);
1540 pnode = uv_blade_to_pnode(uvhub);
1542 activation_descriptor_init(node, pnode, base_pnode);
1544 pq_init(node, pnode);
1546 * The below initialization can't be in firmware because the
1547 * messaging IRQ will be determined by the OS.
1549 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1550 write_mmr_data_config(pnode, ((apicid << 32) | vector));
1554 * We will set BAU_MISC_CONTROL with a timeout period.
1555 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1556 * So the destination timeout period has to be calculated from them.
1558 static int calculate_destination_timeout(void)
1560 unsigned long mmr_image;
1561 int mult1;
1562 int mult2;
1563 int index;
1564 int base;
1565 int ret;
1566 unsigned long ts_ns;
1568 if (is_uv1_hub()) {
1569 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1570 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1571 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1572 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1573 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1574 base = timeout_base_ns[index];
1575 ts_ns = base * mult1 * mult2;
1576 ret = ts_ns / 1000;
1577 } else {
1578 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1579 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1580 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1581 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1582 base = 80;
1583 else
1584 base = 10;
1585 mult1 = mmr_image & UV2_ACK_MASK;
1586 ret = mult1 * base;
1588 return ret;
1591 static void __init init_per_cpu_tunables(void)
1593 int cpu;
1594 struct bau_control *bcp;
1596 for_each_present_cpu(cpu) {
1597 bcp = &per_cpu(bau_control, cpu);
1598 bcp->baudisabled = 0;
1599 bcp->statp = &per_cpu(ptcstats, cpu);
1600 /* time interval to catch a hardware stay-busy bug */
1601 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1602 bcp->max_concurr = max_concurr;
1603 bcp->max_concurr_const = max_concurr;
1604 bcp->plugged_delay = plugged_delay;
1605 bcp->plugsb4reset = plugsb4reset;
1606 bcp->timeoutsb4reset = timeoutsb4reset;
1607 bcp->ipi_reset_limit = ipi_reset_limit;
1608 bcp->complete_threshold = complete_threshold;
1609 bcp->cong_response_us = congested_respns_us;
1610 bcp->cong_reps = congested_reps;
1611 bcp->cong_period = congested_period;
1616 * Scan all cpus to collect blade and socket summaries.
1618 static int __init get_cpu_topology(int base_pnode,
1619 struct uvhub_desc *uvhub_descs,
1620 unsigned char *uvhub_mask)
1622 int cpu;
1623 int pnode;
1624 int uvhub;
1625 int socket;
1626 struct bau_control *bcp;
1627 struct uvhub_desc *bdp;
1628 struct socket_desc *sdp;
1630 for_each_present_cpu(cpu) {
1631 bcp = &per_cpu(bau_control, cpu);
1633 memset(bcp, 0, sizeof(struct bau_control));
1635 pnode = uv_cpu_hub_info(cpu)->pnode;
1636 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1637 printk(KERN_EMERG
1638 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1639 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1640 return 1;
1643 bcp->osnode = cpu_to_node(cpu);
1644 bcp->partition_base_pnode = base_pnode;
1646 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1647 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1648 bdp = &uvhub_descs[uvhub];
1650 bdp->num_cpus++;
1651 bdp->uvhub = uvhub;
1652 bdp->pnode = pnode;
1654 /* kludge: 'assuming' one node per socket, and assuming that
1655 disabling a socket just leaves a gap in node numbers */
1656 socket = bcp->osnode & 1;
1657 bdp->socket_mask |= (1 << socket);
1658 sdp = &bdp->socket[socket];
1659 sdp->cpu_number[sdp->num_cpus] = cpu;
1660 sdp->num_cpus++;
1661 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1662 printk(KERN_EMERG "%d cpus per socket invalid\n",
1663 sdp->num_cpus);
1664 return 1;
1667 return 0;
1671 * Each socket is to get a local array of pnodes/hubs.
1673 static void make_per_cpu_thp(struct bau_control *smaster)
1675 int cpu;
1676 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1678 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1679 memset(smaster->thp, 0, hpsz);
1680 for_each_present_cpu(cpu) {
1681 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1682 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1687 * Initialize all the per_cpu information for the cpu's on a given socket,
1688 * given what has been gathered into the socket_desc struct.
1689 * And reports the chosen hub and socket masters back to the caller.
1691 static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1692 struct bau_control **smasterp,
1693 struct bau_control **hmasterp)
1695 int i;
1696 int cpu;
1697 struct bau_control *bcp;
1699 for (i = 0; i < sdp->num_cpus; i++) {
1700 cpu = sdp->cpu_number[i];
1701 bcp = &per_cpu(bau_control, cpu);
1702 bcp->cpu = cpu;
1703 if (i == 0) {
1704 *smasterp = bcp;
1705 if (!(*hmasterp))
1706 *hmasterp = bcp;
1708 bcp->cpus_in_uvhub = bdp->num_cpus;
1709 bcp->cpus_in_socket = sdp->num_cpus;
1710 bcp->socket_master = *smasterp;
1711 bcp->uvhub = bdp->uvhub;
1712 bcp->uvhub_master = *hmasterp;
1713 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1714 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1715 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1716 bcp->uvhub_cpu);
1717 return 1;
1720 return 0;
1724 * Summarize the blade and socket topology into the per_cpu structures.
1726 static int __init summarize_uvhub_sockets(int nuvhubs,
1727 struct uvhub_desc *uvhub_descs,
1728 unsigned char *uvhub_mask)
1730 int socket;
1731 int uvhub;
1732 unsigned short socket_mask;
1734 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1735 struct uvhub_desc *bdp;
1736 struct bau_control *smaster = NULL;
1737 struct bau_control *hmaster = NULL;
1739 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
1740 continue;
1742 bdp = &uvhub_descs[uvhub];
1743 socket_mask = bdp->socket_mask;
1744 socket = 0;
1745 while (socket_mask) {
1746 struct socket_desc *sdp;
1747 if ((socket_mask & 1)) {
1748 sdp = &bdp->socket[socket];
1749 if (scan_sock(sdp, bdp, &smaster, &hmaster))
1750 return 1;
1752 socket++;
1753 socket_mask = (socket_mask >> 1);
1754 make_per_cpu_thp(smaster);
1757 return 0;
1761 * initialize the bau_control structure for each cpu
1763 static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1765 unsigned char *uvhub_mask;
1766 void *vp;
1767 struct uvhub_desc *uvhub_descs;
1769 timeout_us = calculate_destination_timeout();
1771 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1772 uvhub_descs = (struct uvhub_desc *)vp;
1773 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1774 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1776 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1777 return 1;
1779 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
1780 return 1;
1782 kfree(uvhub_descs);
1783 kfree(uvhub_mask);
1784 init_per_cpu_tunables();
1785 return 0;
1789 * Initialization of BAU-related structures
1791 static int __init uv_bau_init(void)
1793 int uvhub;
1794 int pnode;
1795 int nuvhubs;
1796 int cur_cpu;
1797 int cpus;
1798 int vector;
1799 cpumask_var_t *mask;
1801 if (!is_uv_system())
1802 return 0;
1804 if (nobau)
1805 return 0;
1807 for_each_possible_cpu(cur_cpu) {
1808 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
1809 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
1812 nuvhubs = uv_num_possible_blades();
1813 spin_lock_init(&disable_lock);
1814 congested_cycles = usec_2_cycles(congested_respns_us);
1816 uv_base_pnode = 0x7fffffff;
1817 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1818 cpus = uv_blade_nr_possible_cpus(uvhub);
1819 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
1820 uv_base_pnode = uv_blade_to_pnode(uvhub);
1823 enable_timeouts();
1825 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
1826 nobau = 1;
1827 return 0;
1830 vector = UV_BAU_MESSAGE;
1831 for_each_possible_blade(uvhub)
1832 if (uv_blade_nr_possible_cpus(uvhub))
1833 init_uvhub(uvhub, vector, uv_base_pnode);
1835 alloc_intr_gate(vector, uv_bau_message_intr1);
1837 for_each_possible_blade(uvhub) {
1838 if (uv_blade_nr_possible_cpus(uvhub)) {
1839 unsigned long val;
1840 unsigned long mmr;
1841 pnode = uv_blade_to_pnode(uvhub);
1842 /* INIT the bau */
1843 val = 1L << 63;
1844 write_gmmr_activation(pnode, val);
1845 mmr = 1; /* should be 1 to broadcast to both sockets */
1846 write_mmr_data_broadcast(pnode, mmr);
1850 return 0;
1852 core_initcall(uv_bau_init);
1853 fs_initcall(uv_ptc_init);