4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
27 #include <linux/module.h>
28 #include <linux/efi.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/cpumask.h>
36 #include <asm/processor.h>
37 #include <linux/smp.h>
39 MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
40 MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
41 MODULE_LICENSE("GPL");
43 #define PALINFO_VERSION "0.5"
45 typedef int (*palinfo_func_t
)(struct seq_file
*);
48 const char *name
; /* name of the proc entry */
49 palinfo_func_t proc_read
; /* function to call for reading */
50 struct proc_dir_entry
*entry
; /* registered entry (removal) */
55 * A bunch of string array to get pretty printing
58 static const char *cache_types
[] = {
62 "Data/Instruction" /* unified */
65 static const char *cache_mattrib
[]={
72 static const char *cache_st_hints
[]={
76 "Non-temporal, all levels",
83 static const char *cache_ld_hints
[]={
85 "Non-temporal, level 1",
87 "Non-temporal, all levels",
94 static const char *rse_hints
[]={
98 "eager loads and stores"
101 #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
103 static const char *mem_attrib
[]={
115 * Take a 64bit vector and produces a string such that
116 * if bit n is set then 2^n in clear text is generated. The adjustment
117 * to the right unit is also done.
120 * - a pointer to a buffer to hold the string
123 * - a pointer to the end of the buffer
126 static void bitvector_process(struct seq_file
*m
, u64 vector
)
129 static const char *units
[]={ "", "K", "M", "G", "T" };
131 for (i
=0, j
=0; i
< 64; i
++ , j
=i
/10) {
133 seq_printf(m
, "%d%s ", 1 << (i
-j
*10), units
[j
]);
139 * Take a 64bit vector and produces a string such that
140 * if bit n is set then register n is present. The function
141 * takes into account consecutive registers and prints out ranges.
144 * - a pointer to a buffer to hold the string
147 * - a pointer to the end of the buffer
150 static void bitregister_process(struct seq_file
*m
, u64
*reg_info
, int max
)
152 int i
, begin
, skip
= 0;
153 u64 value
= reg_info
[0];
155 value
>>= i
= begin
= ffs(value
) - 1;
157 for(; i
< max
; i
++ ) {
159 if (i
!= 0 && (i
%64) == 0) value
= *++reg_info
;
161 if ((value
& 0x1) == 0 && skip
== 0) {
163 seq_printf(m
, "%d-%d ", begin
, i
-1);
165 seq_printf(m
, "%d ", i
-1);
168 } else if ((value
& 0x1) && skip
== 1) {
176 seq_printf(m
, "%d-127", begin
);
182 static int power_info(struct seq_file
*m
)
185 u64 halt_info_buffer
[8];
186 pal_power_mgmt_info_u_t
*halt_info
=(pal_power_mgmt_info_u_t
*)halt_info_buffer
;
189 status
= ia64_pal_halt_info(halt_info
);
190 if (status
!= 0) return 0;
192 for (i
=0; i
< 8 ; i
++ ) {
193 if (halt_info
[i
].pal_power_mgmt_info_s
.im
== 1) {
196 "\tentry_latency : %d cycles\n"
197 "\texit_latency : %d cycles\n"
198 "\tpower consumption : %d mW\n"
199 "\tCache+TLB coherency : %s\n", i
,
200 halt_info
[i
].pal_power_mgmt_info_s
.entry_latency
,
201 halt_info
[i
].pal_power_mgmt_info_s
.exit_latency
,
202 halt_info
[i
].pal_power_mgmt_info_s
.power_consumption
,
203 halt_info
[i
].pal_power_mgmt_info_s
.co
? "Yes" : "No");
205 seq_printf(m
,"Power level %d: not implemented\n", i
);
211 static int cache_info(struct seq_file
*m
)
213 unsigned long i
, levels
, unique_caches
;
214 pal_cache_config_info_t cci
;
218 if ((status
= ia64_pal_cache_summary(&levels
, &unique_caches
)) != 0) {
219 printk(KERN_ERR
"ia64_pal_cache_summary=%ld\n", status
);
223 seq_printf(m
, "Cache levels : %ld\nUnique caches : %ld\n\n",
224 levels
, unique_caches
);
226 for (i
=0; i
< levels
; i
++) {
227 for (j
=2; j
>0 ; j
--) {
228 /* even without unification some level may not be present */
229 if ((status
=ia64_pal_cache_config_info(i
,j
, &cci
)) != 0)
233 "%s Cache level %lu:\n"
234 "\tSize : %u bytes\n"
236 cache_types
[j
+cci
.pcci_unified
], i
+1,
237 cci
.pcci_cache_size
);
239 if (cci
.pcci_unified
)
240 seq_puts(m
, "Unified ");
242 seq_printf(m
, "%s\n", cache_mattrib
[cci
.pcci_cache_attr
]);
245 "\tAssociativity : %d\n"
246 "\tLine size : %d bytes\n"
247 "\tStride : %d bytes\n",
249 1<<cci
.pcci_line_size
,
252 seq_puts(m
, "\tStore latency : N/A\n");
254 seq_printf(m
, "\tStore latency : %d cycle(s)\n",
255 cci
.pcci_st_latency
);
258 "\tLoad latency : %d cycle(s)\n"
259 "\tStore hints : ", cci
.pcci_ld_latency
);
261 for(k
=0; k
< 8; k
++ ) {
262 if ( cci
.pcci_st_hints
& 0x1)
263 seq_printf(m
, "[%s]", cache_st_hints
[k
]);
264 cci
.pcci_st_hints
>>=1;
266 seq_puts(m
, "\n\tLoad hints : ");
268 for(k
=0; k
< 8; k
++ ) {
269 if (cci
.pcci_ld_hints
& 0x1)
270 seq_printf(m
, "[%s]", cache_ld_hints
[k
]);
271 cci
.pcci_ld_hints
>>=1;
274 "\n\tAlias boundary : %d byte(s)\n"
277 1<<cci
.pcci_alias_boundary
, cci
.pcci_tag_lsb
,
280 /* when unified, data(j=2) is enough */
281 if (cci
.pcci_unified
)
289 static int vm_info(struct seq_file
*m
)
291 u64 tr_pages
=0, vw_pages
=0, tc_pages
;
293 pal_vm_info_1_u_t vm_info_1
;
294 pal_vm_info_2_u_t vm_info_2
;
295 pal_tc_info_u_t tc_info
;
296 ia64_ptce_info_t ptce
;
301 if ((status
= ia64_pal_vm_summary(&vm_info_1
, &vm_info_2
)) !=0) {
302 printk(KERN_ERR
"ia64_pal_vm_summary=%ld\n", status
);
306 "Physical Address Space : %d bits\n"
307 "Virtual Address Space : %d bits\n"
308 "Protection Key Registers(PKR) : %d\n"
309 "Implemented bits in PKR.key : %d\n"
310 "Hash Tag ID : 0x%x\n"
311 "Size of RR.rid : %d\n"
313 vm_info_1
.pal_vm_info_1_s
.phys_add_size
,
314 vm_info_2
.pal_vm_info_2_s
.impl_va_msb
+1,
315 vm_info_1
.pal_vm_info_1_s
.max_pkr
+1,
316 vm_info_1
.pal_vm_info_1_s
.key_size
,
317 vm_info_1
.pal_vm_info_1_s
.hash_tag_id
,
318 vm_info_2
.pal_vm_info_2_s
.rid_size
);
319 if (vm_info_2
.pal_vm_info_2_s
.max_purges
== PAL_MAX_PURGES
)
320 seq_puts(m
, "unlimited\n");
322 seq_printf(m
, "%d\n",
323 vm_info_2
.pal_vm_info_2_s
.max_purges
?
324 vm_info_2
.pal_vm_info_2_s
.max_purges
: 1);
327 if (ia64_pal_mem_attrib(&attrib
) == 0) {
328 seq_puts(m
, "Supported memory attributes : ");
330 for (i
= 0; i
< 8; i
++) {
331 if (attrib
& (1 << i
)) {
332 seq_printf(m
, "%s%s", sep
, mem_attrib
[i
]);
339 if ((status
= ia64_pal_vm_page_size(&tr_pages
, &vw_pages
)) !=0) {
340 printk(KERN_ERR
"ia64_pal_vm_page_size=%ld\n", status
);
344 "\nTLB walker : %simplemented\n"
345 "Number of DTR : %d\n"
346 "Number of ITR : %d\n"
347 "TLB insertable page sizes : ",
348 vm_info_1
.pal_vm_info_1_s
.vw
? "" : "not ",
349 vm_info_1
.pal_vm_info_1_s
.max_dtr_entry
+1,
350 vm_info_1
.pal_vm_info_1_s
.max_itr_entry
+1);
352 bitvector_process(m
, tr_pages
);
354 seq_puts(m
, "\nTLB purgeable page sizes : ");
356 bitvector_process(m
, vw_pages
);
359 if ((status
= ia64_get_ptce(&ptce
)) != 0) {
360 printk(KERN_ERR
"ia64_get_ptce=%ld\n", status
);
363 "\nPurge base address : 0x%016lx\n"
364 "Purge outer loop count : %d\n"
365 "Purge inner loop count : %d\n"
366 "Purge outer loop stride : %d\n"
367 "Purge inner loop stride : %d\n",
368 ptce
.base
, ptce
.count
[0], ptce
.count
[1],
369 ptce
.stride
[0], ptce
.stride
[1]);
373 "Unique TC(s) : %d\n",
374 vm_info_1
.pal_vm_info_1_s
.num_tc_levels
,
375 vm_info_1
.pal_vm_info_1_s
.max_unique_tcs
);
377 for(i
=0; i
< vm_info_1
.pal_vm_info_1_s
.num_tc_levels
; i
++) {
378 for (j
=2; j
>0 ; j
--) {
379 tc_pages
= 0; /* just in case */
381 /* even without unification, some levels may not be present */
382 if ((status
=ia64_pal_vm_info(i
,j
, &tc_info
, &tc_pages
)) != 0)
386 "\n%s Translation Cache Level %d:\n"
388 "\tAssociativity : %d\n"
389 "\tNumber of entries : %d\n"
391 cache_types
[j
+tc_info
.tc_unified
], i
+1,
393 tc_info
.tc_associativity
,
394 tc_info
.tc_num_entries
);
397 seq_puts(m
, "PreferredPageSizeOptimized ");
398 if (tc_info
.tc_unified
)
399 seq_puts(m
, "Unified ");
400 if (tc_info
.tc_reduce_tr
)
401 seq_puts(m
, "TCReduction");
403 seq_puts(m
, "\n\tSupported page sizes: ");
405 bitvector_process(m
, tc_pages
);
407 /* when unified date (j=2) is enough */
408 if (tc_info
.tc_unified
)
419 static int register_info(struct seq_file
*m
)
423 unsigned long phys_stacked
;
425 unsigned long iregs
, dregs
;
426 static const char * const info_type
[] = {
428 "AR(s) with read side-effects",
430 "CR(s) with read side-effects",
433 for(info
=0; info
< 4; info
++) {
434 if (ia64_pal_register_info(info
, ®_info
[0], ®_info
[1]) != 0)
436 seq_printf(m
, "%-32s : ", info_type
[info
]);
437 bitregister_process(m
, reg_info
, 128);
441 if (ia64_pal_rse_info(&phys_stacked
, &hints
) == 0)
443 "RSE stacked physical registers : %ld\n"
444 "RSE load/store hints : %ld (%s)\n",
445 phys_stacked
, hints
.ph_data
,
446 hints
.ph_data
< RSE_HINTS_COUNT
? rse_hints
[hints
.ph_data
]: "(??)");
448 if (ia64_pal_debug_info(&iregs
, &dregs
))
452 "Instruction debug register pairs : %ld\n"
453 "Data debug register pairs : %ld\n", iregs
, dregs
);
458 static const char *const proc_features_0
[]={ /* Feature set 0 */
459 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,
460 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
, NULL
,NULL
,
461 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,
462 NULL
,NULL
,NULL
,NULL
,NULL
, NULL
,NULL
,NULL
,NULL
,
463 "Unimplemented instruction address fault",
464 "INIT, PMI, and LINT pins",
465 "Simple unimplemented instr addresses",
466 "Variable P-state performance",
467 "Virtual machine features implemented",
468 "XIP,XPSR,XFS implemented",
469 "XR1-XR3 implemented",
470 "Disable dynamic predicate prediction",
471 "Disable processor physical number",
472 "Disable dynamic data cache prefetch",
473 "Disable dynamic inst cache prefetch",
474 "Disable dynamic branch prediction",
475 NULL
, NULL
, NULL
, NULL
,
477 "Enable MCA on Data Poisoning",
478 "Enable vmsw instruction",
479 "Enable extern environmental notification",
480 "Disable BINIT on processor time-out",
481 "Disable dynamic power management (DPM)",
484 "Enable CMCI promotion",
485 "Enable MCA to BINIT promotion",
486 "Enable MCA promotion",
487 "Enable BERR promotion"
490 static const char *const proc_features_16
[]={ /* Feature set 16 */
493 "Enable MCA on half-way timer",
496 "Enable Fast Deferral",
497 "Disable MCA on memory aliasing",
499 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
500 "DP system processor",
503 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
504 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
505 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
506 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
507 NULL
, NULL
, NULL
, NULL
, NULL
510 static const char *const *const proc_features
[]={
512 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
513 NULL
, NULL
, NULL
, NULL
,
515 NULL
, NULL
, NULL
, NULL
,
518 static void feature_set_info(struct seq_file
*m
, u64 avail
, u64 status
, u64 control
,
521 const char *const *vf
, *const *v
;
524 vf
= v
= proc_features
[set
];
525 for(i
=0; i
< 64; i
++, avail
>>=1, status
>>=1, control
>>=1) {
527 if (!(control
)) /* No remaining bits set */
529 if (!(avail
& 0x1)) /* Print only bits that are available */
534 seq_printf(m
, "%-40s : %s %s\n", *v
,
535 avail
& 0x1 ? (status
& 0x1 ?
537 avail
& 0x1 ? (control
& 0x1 ?
538 "Ctrl" : "NoCtrl"): "");
540 seq_printf(m
, "Feature set %2ld bit %2d\t\t\t"
543 avail
& 0x1 ? (status
& 0x1 ?
545 avail
& 0x1 ? (control
& 0x1 ?
546 "Ctrl" : "NoCtrl"): "");
551 static int processor_info(struct seq_file
*m
)
553 u64 avail
=1, status
=1, control
=1, feature_set
=0;
557 ret
= ia64_pal_proc_get_features(&avail
, &status
, &control
,
567 feature_set_info(m
, avail
, status
, control
, feature_set
);
574 static const char *const bus_features
[]={
575 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,
576 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
, NULL
,NULL
,
577 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,
579 "Request Bus Parking",
581 "Enable Half Transfer",
582 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
583 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
584 NULL
, NULL
, NULL
, NULL
,
585 "Enable Cache Line Repl. Shared",
586 "Enable Cache Line Repl. Exclusive",
587 "Disable Transaction Queuing",
588 "Disable Response Error Checking",
589 "Disable Bus Error Checking",
590 "Disable Bus Requester Internal Error Signalling",
591 "Disable Bus Requester Error Signalling",
592 "Disable Bus Initialization Event Checking",
593 "Disable Bus Initialization Event Signalling",
594 "Disable Bus Address Error Checking",
595 "Disable Bus Address Error Signalling",
596 "Disable Bus Data Error Checking"
600 static int bus_info(struct seq_file
*m
)
602 const char *const *v
= bus_features
;
603 pal_bus_features_u_t av
, st
, ct
;
604 u64 avail
, status
, control
;
608 if ((ret
=ia64_pal_bus_get_features(&av
, &st
, &ct
)) != 0)
611 avail
= av
.pal_bus_features_val
;
612 status
= st
.pal_bus_features_val
;
613 control
= ct
.pal_bus_features_val
;
615 for(i
=0; i
< 64; i
++, v
++, avail
>>=1, status
>>=1, control
>>=1) {
618 seq_printf(m
, "%-48s : %s%s %s\n", *v
,
619 avail
& 0x1 ? "" : "NotImpl",
620 avail
& 0x1 ? (status
& 0x1 ? "On" : "Off"): "",
621 avail
& 0x1 ? (control
& 0x1 ? "Ctrl" : "NoCtrl"): "");
626 static int version_info(struct seq_file
*m
)
628 pal_version_u_t min_ver
, cur_ver
;
630 if (ia64_pal_version(&min_ver
, &cur_ver
) != 0)
634 "PAL_vendor : 0x%02x (min=0x%02x)\n"
635 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
636 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
637 cur_ver
.pal_version_s
.pv_pal_vendor
,
638 min_ver
.pal_version_s
.pv_pal_vendor
,
639 cur_ver
.pal_version_s
.pv_pal_a_model
,
640 cur_ver
.pal_version_s
.pv_pal_a_rev
,
641 min_ver
.pal_version_s
.pv_pal_a_model
,
642 min_ver
.pal_version_s
.pv_pal_a_rev
,
643 cur_ver
.pal_version_s
.pv_pal_b_model
,
644 cur_ver
.pal_version_s
.pv_pal_b_rev
,
645 min_ver
.pal_version_s
.pv_pal_b_model
,
646 min_ver
.pal_version_s
.pv_pal_b_rev
);
650 static int perfmon_info(struct seq_file
*m
)
653 pal_perf_mon_info_u_t pm_info
;
655 if (ia64_pal_perf_mon_info(pm_buffer
, &pm_info
) != 0)
659 "PMC/PMD pairs : %d\n"
660 "Counter width : %d bits\n"
661 "Cycle event number : %d\n"
662 "Retired event number : %d\n"
663 "Implemented PMC : ",
664 pm_info
.pal_perf_mon_info_s
.generic
,
665 pm_info
.pal_perf_mon_info_s
.width
,
666 pm_info
.pal_perf_mon_info_s
.cycles
,
667 pm_info
.pal_perf_mon_info_s
.retired
);
669 bitregister_process(m
, pm_buffer
, 256);
670 seq_puts(m
, "\nImplemented PMD : ");
671 bitregister_process(m
, pm_buffer
+4, 256);
672 seq_puts(m
, "\nCycles count capable : ");
673 bitregister_process(m
, pm_buffer
+8, 256);
674 seq_puts(m
, "\nRetired bundles count capable : ");
676 #ifdef CONFIG_ITANIUM
678 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
679 * which is wrong, both PMC4 and PMD5 support it.
681 if (pm_buffer
[12] == 0x10)
685 bitregister_process(m
, pm_buffer
+12, 256);
690 static int frequency_info(struct seq_file
*m
)
692 struct pal_freq_ratio proc
, itc
, bus
;
695 if (ia64_pal_freq_base(&base
) == -1)
696 seq_puts(m
, "Output clock : not implemented\n");
698 seq_printf(m
, "Output clock : %ld ticks/s\n", base
);
700 if (ia64_pal_freq_ratios(&proc
, &bus
, &itc
) != 0) return 0;
703 "Processor/Clock ratio : %d/%d\n"
704 "Bus/Clock ratio : %d/%d\n"
705 "ITC/Clock ratio : %d/%d\n",
706 proc
.num
, proc
.den
, bus
.num
, bus
.den
, itc
.num
, itc
.den
);
710 static int tr_info(struct seq_file
*m
)
713 pal_tr_valid_u_t tr_valid
;
715 pal_vm_info_1_u_t vm_info_1
;
716 pal_vm_info_2_u_t vm_info_2
;
718 unsigned long max
[3], pgm
;
720 unsigned long valid
:1;
722 unsigned long vpn
:52;
727 unsigned long key
:24;
728 unsigned long rv2
:32;
738 unsigned long ppn
:38;
747 unsigned long rid
:24;
748 unsigned long rv2
:32;
751 if ((status
= ia64_pal_vm_summary(&vm_info_1
, &vm_info_2
)) !=0) {
752 printk(KERN_ERR
"ia64_pal_vm_summary=%ld\n", status
);
755 max
[0] = vm_info_1
.pal_vm_info_1_s
.max_itr_entry
+1;
756 max
[1] = vm_info_1
.pal_vm_info_1_s
.max_dtr_entry
+1;
758 for (i
=0; i
< 2; i
++ ) {
759 for (j
=0; j
< max
[i
]; j
++) {
761 status
= ia64_pal_tr_read(j
, i
, tr_buffer
, &tr_valid
);
763 printk(KERN_ERR
"palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
768 ifa_reg
= (struct ifa_reg
*)&tr_buffer
[2];
770 if (ifa_reg
->valid
== 0)
773 gr_reg
= (struct gr_reg
*)tr_buffer
;
774 itir_reg
= (struct itir_reg
*)&tr_buffer
[1];
775 rid_reg
= (struct rid_reg
*)&tr_buffer
[3];
777 pgm
= -1 << (itir_reg
->ps
- 12);
779 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
784 tr_valid
.pal_tr_valid_s
.access_rights_valid
,
785 tr_valid
.pal_tr_valid_s
.priv_level_valid
,
786 tr_valid
.pal_tr_valid_s
.dirty_bit_valid
,
787 tr_valid
.pal_tr_valid_s
.mem_attr_valid
,
788 (gr_reg
->ppn
& pgm
)<< 12, (ifa_reg
->vpn
& pgm
)<< 12);
790 bitvector_process(m
, 1<< itir_reg
->ps
);
799 gr_reg
->pl
, gr_reg
->ar
, rid_reg
->rid
, gr_reg
->p
, gr_reg
->ma
,
809 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
811 static const palinfo_entry_t palinfo_entries
[]={
812 { "version_info", version_info
, },
813 { "vm_info", vm_info
, },
814 { "cache_info", cache_info
, },
815 { "power_info", power_info
, },
816 { "register_info", register_info
, },
817 { "processor_info", processor_info
, },
818 { "perfmon_info", perfmon_info
, },
819 { "frequency_info", frequency_info
, },
820 { "bus_info", bus_info
},
821 { "tr_info", tr_info
, }
824 #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
826 static struct proc_dir_entry
*palinfo_dir
;
829 * This data structure is used to pass which cpu,function is being requested
830 * It must fit in a 64bit quantity to be passed to the proc callback routine
832 * In SMP mode, when we get a request for another CPU, we must call that
833 * other CPU using IPI and wait for the result before returning.
838 unsigned req_cpu
: 32; /* for which CPU this info is */
839 unsigned func_id
: 32; /* which function is requested */
843 #define req_cpu pal_func_cpu.req_cpu
844 #define func_id pal_func_cpu.func_id
849 * used to hold information about final function to call
852 palinfo_func_t func
; /* pointer to function to call */
853 struct seq_file
*m
; /* buffer to store results */
854 int ret
; /* return value from call */
855 } palinfo_smp_data_t
;
859 * this function does the actual final call and he called
860 * from the smp code, i.e., this is the palinfo callback routine
863 palinfo_smp_call(void *info
)
865 palinfo_smp_data_t
*data
= (palinfo_smp_data_t
*)info
;
866 data
->ret
= (*data
->func
)(data
->m
);
870 * function called to trigger the IPI, we need to access a remote CPU
872 * 0 : error or nothing to output
873 * otherwise how many bytes in the "page" buffer were written
876 int palinfo_handle_smp(struct seq_file
*m
, pal_func_cpu_u_t
*f
)
878 palinfo_smp_data_t ptr
;
881 ptr
.func
= palinfo_entries
[f
->func_id
].proc_read
;
883 ptr
.ret
= 0; /* just in case */
886 /* will send IPI to other CPU and wait for completion of remote call */
887 if ((ret
=smp_call_function_single(f
->req_cpu
, palinfo_smp_call
, &ptr
, 1))) {
888 printk(KERN_ERR
"palinfo: remote CPU call from %d to %d on function %d: "
889 "error %d\n", smp_processor_id(), f
->req_cpu
, f
->func_id
, ret
);
894 #else /* ! CONFIG_SMP */
896 int palinfo_handle_smp(struct seq_file
*m
, pal_func_cpu_u_t
*f
)
898 printk(KERN_ERR
"palinfo: should not be called with non SMP kernel\n");
901 #endif /* CONFIG_SMP */
904 * Entry point routine: all calls go through this function
906 static int proc_palinfo_show(struct seq_file
*m
, void *v
)
908 pal_func_cpu_u_t
*f
= (pal_func_cpu_u_t
*)&m
->private;
911 * in SMP mode, we may need to call another CPU to get correct
912 * information. PAL, by definition, is processor specific
914 if (f
->req_cpu
== get_cpu())
915 (*palinfo_entries
[f
->func_id
].proc_read
)(m
);
917 palinfo_handle_smp(m
, f
);
923 static int proc_palinfo_open(struct inode
*inode
, struct file
*file
)
925 return single_open(file
, proc_palinfo_show
, PDE_DATA(inode
));
928 static const struct file_operations proc_palinfo_fops
= {
929 .open
= proc_palinfo_open
,
932 .release
= single_release
,
936 create_palinfo_proc_entries(unsigned int cpu
)
939 struct proc_dir_entry
*cpu_dir
;
941 char cpustr
[3+4+1]; /* cpu numbers are up to 4095 on itanic */
942 sprintf(cpustr
, "cpu%d", cpu
);
944 cpu_dir
= proc_mkdir(cpustr
, palinfo_dir
);
950 for (j
=0; j
< NR_PALINFO_ENTRIES
; j
++) {
952 proc_create_data(palinfo_entries
[j
].name
, 0, cpu_dir
,
953 &proc_palinfo_fops
, (void *)f
.value
);
958 remove_palinfo_proc_entries(unsigned int hcpu
)
960 char cpustr
[3+4+1]; /* cpu numbers are up to 4095 on itanic */
961 sprintf(cpustr
, "cpu%d", hcpu
);
962 remove_proc_subtree(cpustr
, palinfo_dir
);
965 static int palinfo_cpu_callback(struct notifier_block
*nfb
,
966 unsigned long action
, void *hcpu
)
968 unsigned int hotcpu
= (unsigned long)hcpu
;
972 case CPU_ONLINE_FROZEN
:
973 create_palinfo_proc_entries(hotcpu
);
976 case CPU_DEAD_FROZEN
:
977 remove_palinfo_proc_entries(hotcpu
);
983 static struct notifier_block __refdata palinfo_cpu_notifier
=
985 .notifier_call
= palinfo_cpu_callback
,
994 printk(KERN_INFO
"PAL Information Facility v%s\n", PALINFO_VERSION
);
995 palinfo_dir
= proc_mkdir("pal", NULL
);
999 cpu_notifier_register_begin();
1001 /* Create palinfo dirs in /proc for all online cpus */
1002 for_each_online_cpu(i
) {
1003 create_palinfo_proc_entries(i
);
1006 /* Register for future delivery via notify registration */
1007 __register_hotcpu_notifier(&palinfo_cpu_notifier
);
1009 cpu_notifier_register_done();
1017 unregister_hotcpu_notifier(&palinfo_cpu_notifier
);
1018 remove_proc_subtree("pal", NULL
);
1021 module_init(palinfo_init
);
1022 module_exit(palinfo_exit
);