2 * Linux on zSeries Channel Measurement Facility support
4 * Copyright IBM Corp. 2000, 2006
6 * Authors: Arnd Bergmann <arndb@de.ibm.com>
7 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define KMSG_COMPONENT "cio"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 #include <linux/bootmem.h>
30 #include <linux/device.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/slab.h>
36 #include <linux/timex.h> /* get_tod_clock() */
38 #include <asm/ccwdev.h>
41 #include <asm/div64.h>
50 * parameter to enable cmf during boot, possible uses are:
51 * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
52 * used on any subchannel
53 * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
54 * <num> subchannel, where <num> is an integer
55 * between 1 and 65535, default is 1024
57 #define ARGSTRING "s390cmf"
59 /* indices for READCMB */
61 /* basic and exended format: */
64 cmb_device_connect_time
,
65 cmb_function_pending_time
,
66 cmb_device_disconnect_time
,
67 cmb_control_unit_queuing_time
,
68 cmb_device_active_only_time
,
69 /* extended format only: */
71 cmb_initial_command_response_time
,
75 * enum cmb_format - types of supported measurement block formats
77 * @CMF_BASIC: traditional channel measurement blocks supported
78 * by all machines that we run on
79 * @CMF_EXTENDED: improved format that was introduced with the z990
81 * @CMF_AUTODETECT: default: use extended format when running on a machine
82 * supporting extended format, otherwise fall back to
92 * format - actual format for all measurement blocks
94 * The format module parameter can be set to a value of 0 (zero)
95 * or 1, indicating basic or extended format as described for
98 static int format
= CMF_AUTODETECT
;
99 module_param(format
, bint
, 0444);
102 * struct cmb_operations - functions to use depending on cmb_format
104 * Most of these functions operate on a struct ccw_device. There is only
105 * one instance of struct cmb_operations because the format of the measurement
106 * data is guaranteed to be the same for every ccw_device.
108 * @alloc: allocate memory for a channel measurement block,
109 * either with the help of a special pool or with kmalloc
110 * @free: free memory allocated with @alloc
111 * @set: enable or disable measurement
112 * @read: read a measurement entry at an index
113 * @readall: read a measurement block in a common format
114 * @reset: clear the data in the associated measurement block and
115 * reset its time stamp
116 * @align: align an allocated block so that the hardware can use it
118 struct cmb_operations
{
119 int (*alloc
) (struct ccw_device
*);
120 void (*free
) (struct ccw_device
*);
121 int (*set
) (struct ccw_device
*, u32
);
122 u64 (*read
) (struct ccw_device
*, int);
123 int (*readall
)(struct ccw_device
*, struct cmbdata
*);
124 void (*reset
) (struct ccw_device
*);
125 void *(*align
) (void *);
127 struct attribute_group
*attr_group
;
129 static struct cmb_operations
*cmbops
;
132 void *hw_block
; /* Pointer to block updated by hardware */
133 void *last_block
; /* Last changed block copied from hardware block */
134 int size
; /* Size of hw_block and last_block */
135 unsigned long long last_update
; /* when last_block was updated */
139 * Our user interface is designed in terms of nanoseconds,
140 * while the hardware measures total times in its own
143 static inline u64
time_to_nsec(u32 value
)
145 return ((u64
)value
) * 128000ull;
149 * Users are usually interested in average times,
150 * not accumulated time.
151 * This also helps us with atomicity problems
152 * when reading sinlge values.
154 static inline u64
time_to_avg_nsec(u32 value
, u32 count
)
158 /* no samples yet, avoid division by 0 */
162 /* value comes in units of 128 µsec */
163 ret
= time_to_nsec(value
);
170 * Activate or deactivate the channel monitor. When area is NULL,
171 * the monitor is deactivated. The channel monitor needs to
172 * be active in order to measure subchannels, which also need
175 static inline void cmf_activate(void *area
, unsigned int onoff
)
177 register void * __gpr2
asm("2");
178 register long __gpr1
asm("1");
181 __gpr1
= onoff
? 2 : 0;
182 /* activate channel measurement */
183 asm("schm" : : "d" (__gpr2
), "d" (__gpr1
) );
186 static int set_schib(struct ccw_device
*cdev
, u32 mme
, int mbfc
,
187 unsigned long address
)
189 struct subchannel
*sch
;
191 sch
= to_subchannel(cdev
->dev
.parent
);
193 sch
->config
.mme
= mme
;
194 sch
->config
.mbfc
= mbfc
;
195 /* address can be either a block address or a block index */
197 sch
->config
.mba
= address
;
199 sch
->config
.mbi
= address
;
201 return cio_commit_config(sch
);
204 struct set_schib_struct
{
207 unsigned long address
;
208 wait_queue_head_t wait
;
213 static void cmf_set_schib_release(struct kref
*kref
)
215 struct set_schib_struct
*set_data
;
217 set_data
= container_of(kref
, struct set_schib_struct
, kref
);
221 #define CMF_PENDING 1
223 static int set_schib_wait(struct ccw_device
*cdev
, u32 mme
,
224 int mbfc
, unsigned long address
)
226 struct set_schib_struct
*set_data
;
229 spin_lock_irq(cdev
->ccwlock
);
230 if (!cdev
->private->cmb
) {
234 set_data
= kzalloc(sizeof(struct set_schib_struct
), GFP_ATOMIC
);
239 init_waitqueue_head(&set_data
->wait
);
240 kref_init(&set_data
->kref
);
242 set_data
->mbfc
= mbfc
;
243 set_data
->address
= address
;
245 ret
= set_schib(cdev
, mme
, mbfc
, address
);
249 if (cdev
->private->state
!= DEV_STATE_ONLINE
) {
250 /* if the device is not online, don't even try again */
255 cdev
->private->state
= DEV_STATE_CMFCHANGE
;
256 set_data
->ret
= CMF_PENDING
;
257 cdev
->private->cmb_wait
= set_data
;
259 spin_unlock_irq(cdev
->ccwlock
);
260 if (wait_event_interruptible(set_data
->wait
,
261 set_data
->ret
!= CMF_PENDING
)) {
262 spin_lock_irq(cdev
->ccwlock
);
263 if (set_data
->ret
== CMF_PENDING
) {
264 set_data
->ret
= -ERESTARTSYS
;
265 if (cdev
->private->state
== DEV_STATE_CMFCHANGE
)
266 cdev
->private->state
= DEV_STATE_ONLINE
;
268 spin_unlock_irq(cdev
->ccwlock
);
270 spin_lock_irq(cdev
->ccwlock
);
271 cdev
->private->cmb_wait
= NULL
;
274 kref_put(&set_data
->kref
, cmf_set_schib_release
);
276 spin_unlock_irq(cdev
->ccwlock
);
280 void retry_set_schib(struct ccw_device
*cdev
)
282 struct set_schib_struct
*set_data
;
284 set_data
= cdev
->private->cmb_wait
;
289 kref_get(&set_data
->kref
);
290 set_data
->ret
= set_schib(cdev
, set_data
->mme
, set_data
->mbfc
,
292 wake_up(&set_data
->wait
);
293 kref_put(&set_data
->kref
, cmf_set_schib_release
);
296 static int cmf_copy_block(struct ccw_device
*cdev
)
298 struct subchannel
*sch
;
301 struct cmb_data
*cmb_data
;
303 sch
= to_subchannel(cdev
->dev
.parent
);
305 if (cio_update_schib(sch
))
308 if (scsw_fctl(&sch
->schib
.scsw
) & SCSW_FCTL_START_FUNC
) {
309 /* Don't copy if a start function is in progress. */
310 if ((!(scsw_actl(&sch
->schib
.scsw
) & SCSW_ACTL_SUSPENDED
)) &&
311 (scsw_actl(&sch
->schib
.scsw
) &
312 (SCSW_ACTL_DEVACT
| SCSW_ACTL_SCHACT
)) &&
313 (!(scsw_stctl(&sch
->schib
.scsw
) & SCSW_STCTL_SEC_STATUS
)))
316 cmb_data
= cdev
->private->cmb
;
317 hw_block
= cmbops
->align(cmb_data
->hw_block
);
318 if (!memcmp(cmb_data
->last_block
, hw_block
, cmb_data
->size
))
319 /* No need to copy. */
321 reference_buf
= kzalloc(cmb_data
->size
, GFP_ATOMIC
);
324 /* Ensure consistency of block copied from hardware. */
326 memcpy(cmb_data
->last_block
, hw_block
, cmb_data
->size
);
327 memcpy(reference_buf
, hw_block
, cmb_data
->size
);
328 } while (memcmp(cmb_data
->last_block
, reference_buf
, cmb_data
->size
));
329 cmb_data
->last_update
= get_tod_clock();
330 kfree(reference_buf
);
334 struct copy_block_struct
{
335 wait_queue_head_t wait
;
340 static void cmf_copy_block_release(struct kref
*kref
)
342 struct copy_block_struct
*copy_block
;
344 copy_block
= container_of(kref
, struct copy_block_struct
, kref
);
348 static int cmf_cmb_copy_wait(struct ccw_device
*cdev
)
350 struct copy_block_struct
*copy_block
;
354 spin_lock_irqsave(cdev
->ccwlock
, flags
);
355 if (!cdev
->private->cmb
) {
359 copy_block
= kzalloc(sizeof(struct copy_block_struct
), GFP_ATOMIC
);
364 init_waitqueue_head(©_block
->wait
);
365 kref_init(©_block
->kref
);
367 ret
= cmf_copy_block(cdev
);
371 if (cdev
->private->state
!= DEV_STATE_ONLINE
) {
376 cdev
->private->state
= DEV_STATE_CMFUPDATE
;
377 copy_block
->ret
= CMF_PENDING
;
378 cdev
->private->cmb_wait
= copy_block
;
380 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
381 if (wait_event_interruptible(copy_block
->wait
,
382 copy_block
->ret
!= CMF_PENDING
)) {
383 spin_lock_irqsave(cdev
->ccwlock
, flags
);
384 if (copy_block
->ret
== CMF_PENDING
) {
385 copy_block
->ret
= -ERESTARTSYS
;
386 if (cdev
->private->state
== DEV_STATE_CMFUPDATE
)
387 cdev
->private->state
= DEV_STATE_ONLINE
;
389 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
391 spin_lock_irqsave(cdev
->ccwlock
, flags
);
392 cdev
->private->cmb_wait
= NULL
;
393 ret
= copy_block
->ret
;
395 kref_put(©_block
->kref
, cmf_copy_block_release
);
397 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
401 void cmf_retry_copy_block(struct ccw_device
*cdev
)
403 struct copy_block_struct
*copy_block
;
405 copy_block
= cdev
->private->cmb_wait
;
410 kref_get(©_block
->kref
);
411 copy_block
->ret
= cmf_copy_block(cdev
);
412 wake_up(©_block
->wait
);
413 kref_put(©_block
->kref
, cmf_copy_block_release
);
416 static void cmf_generic_reset(struct ccw_device
*cdev
)
418 struct cmb_data
*cmb_data
;
420 spin_lock_irq(cdev
->ccwlock
);
421 cmb_data
= cdev
->private->cmb
;
423 memset(cmb_data
->last_block
, 0, cmb_data
->size
);
425 * Need to reset hw block as well to make the hardware start
428 memset(cmbops
->align(cmb_data
->hw_block
), 0, cmb_data
->size
);
429 cmb_data
->last_update
= 0;
431 cdev
->private->cmb_start_time
= get_tod_clock();
432 spin_unlock_irq(cdev
->ccwlock
);
436 * struct cmb_area - container for global cmb data
438 * @mem: pointer to CMBs (only in basic measurement mode)
439 * @list: contains a linked list of all subchannels
440 * @num_channels: number of channels to be measured
441 * @lock: protect concurrent access to @mem and @list
445 struct list_head list
;
450 static struct cmb_area cmb_area
= {
451 .lock
= __SPIN_LOCK_UNLOCKED(cmb_area
.lock
),
452 .list
= LIST_HEAD_INIT(cmb_area
.list
),
453 .num_channels
= 1024,
456 /* ****** old style CMB handling ********/
459 * Basic channel measurement blocks are allocated in one contiguous
460 * block of memory, which can not be moved as long as any channel
461 * is active. Therefore, a maximum number of subchannels needs to
462 * be defined somewhere. This is a module parameter, defaulting to
463 * a reasonable value of 1024, or 32 kb of memory.
464 * Current kernels don't allow kmalloc with more than 128kb, so the
468 module_param_named(maxchannels
, cmb_area
.num_channels
, uint
, 0444);
471 * struct cmb - basic channel measurement block
472 * @ssch_rsch_count: number of ssch and rsch
473 * @sample_count: number of samples
474 * @device_connect_time: time of device connect
475 * @function_pending_time: time of function pending
476 * @device_disconnect_time: time of device disconnect
477 * @control_unit_queuing_time: time of control unit queuing
478 * @device_active_only_time: time of device active only
479 * @reserved: unused in basic measurement mode
481 * The measurement block as used by the hardware. The fields are described
482 * further in z/Architecture Principles of Operation, chapter 17.
484 * The cmb area made up from these blocks must be a contiguous array and may
485 * not be reallocated or freed.
486 * Only one cmb area can be present in the system.
491 u32 device_connect_time
;
492 u32 function_pending_time
;
493 u32 device_disconnect_time
;
494 u32 control_unit_queuing_time
;
495 u32 device_active_only_time
;
500 * Insert a single device into the cmb_area list.
501 * Called with cmb_area.lock held from alloc_cmb.
503 static int alloc_cmb_single(struct ccw_device
*cdev
,
504 struct cmb_data
*cmb_data
)
507 struct ccw_device_private
*node
;
510 spin_lock_irq(cdev
->ccwlock
);
511 if (!list_empty(&cdev
->private->cmb_list
)) {
517 * Find first unused cmb in cmb_area.mem.
518 * This is a little tricky: cmb_area.list
519 * remains sorted by ->cmb->hw_data pointers.
522 list_for_each_entry(node
, &cmb_area
.list
, cmb_list
) {
523 struct cmb_data
*data
;
525 if ((struct cmb
*)data
->hw_block
> cmb
)
529 if (cmb
- cmb_area
.mem
>= cmb_area
.num_channels
) {
535 list_add_tail(&cdev
->private->cmb_list
, &node
->cmb_list
);
536 cmb_data
->hw_block
= cmb
;
537 cdev
->private->cmb
= cmb_data
;
540 spin_unlock_irq(cdev
->ccwlock
);
544 static int alloc_cmb(struct ccw_device
*cdev
)
549 struct cmb_data
*cmb_data
;
551 /* Allocate private cmb_data. */
552 cmb_data
= kzalloc(sizeof(struct cmb_data
), GFP_KERNEL
);
556 cmb_data
->last_block
= kzalloc(sizeof(struct cmb
), GFP_KERNEL
);
557 if (!cmb_data
->last_block
) {
561 cmb_data
->size
= sizeof(struct cmb
);
562 spin_lock(&cmb_area
.lock
);
565 /* there is no user yet, so we need a new area */
566 size
= sizeof(struct cmb
) * cmb_area
.num_channels
;
567 WARN_ON(!list_empty(&cmb_area
.list
));
569 spin_unlock(&cmb_area
.lock
);
570 mem
= (void*)__get_free_pages(GFP_KERNEL
| GFP_DMA
,
572 spin_lock(&cmb_area
.lock
);
575 /* ok, another thread was faster */
576 free_pages((unsigned long)mem
, get_order(size
));
583 memset(mem
, 0, size
);
585 cmf_activate(cmb_area
.mem
, 1);
589 /* do the actual allocation */
590 ret
= alloc_cmb_single(cdev
, cmb_data
);
592 spin_unlock(&cmb_area
.lock
);
594 kfree(cmb_data
->last_block
);
600 static void free_cmb(struct ccw_device
*cdev
)
602 struct ccw_device_private
*priv
;
603 struct cmb_data
*cmb_data
;
605 spin_lock(&cmb_area
.lock
);
606 spin_lock_irq(cdev
->ccwlock
);
608 priv
= cdev
->private;
610 if (list_empty(&priv
->cmb_list
)) {
615 cmb_data
= priv
->cmb
;
618 kfree(cmb_data
->last_block
);
620 list_del_init(&priv
->cmb_list
);
622 if (list_empty(&cmb_area
.list
)) {
624 size
= sizeof(struct cmb
) * cmb_area
.num_channels
;
625 cmf_activate(NULL
, 0);
626 free_pages((unsigned long)cmb_area
.mem
, get_order(size
));
630 spin_unlock_irq(cdev
->ccwlock
);
631 spin_unlock(&cmb_area
.lock
);
634 static int set_cmb(struct ccw_device
*cdev
, u32 mme
)
637 struct cmb_data
*cmb_data
;
640 spin_lock_irqsave(cdev
->ccwlock
, flags
);
641 if (!cdev
->private->cmb
) {
642 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
645 cmb_data
= cdev
->private->cmb
;
646 offset
= mme
? (struct cmb
*)cmb_data
->hw_block
- cmb_area
.mem
: 0;
647 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
649 return set_schib_wait(cdev
, mme
, 0, offset
);
652 static u64
read_cmb(struct ccw_device
*cdev
, int index
)
659 ret
= cmf_cmb_copy_wait(cdev
);
663 spin_lock_irqsave(cdev
->ccwlock
, flags
);
664 if (!cdev
->private->cmb
) {
668 cmb
= ((struct cmb_data
*)cdev
->private->cmb
)->last_block
;
671 case cmb_ssch_rsch_count
:
672 ret
= cmb
->ssch_rsch_count
;
674 case cmb_sample_count
:
675 ret
= cmb
->sample_count
;
677 case cmb_device_connect_time
:
678 val
= cmb
->device_connect_time
;
680 case cmb_function_pending_time
:
681 val
= cmb
->function_pending_time
;
683 case cmb_device_disconnect_time
:
684 val
= cmb
->device_disconnect_time
;
686 case cmb_control_unit_queuing_time
:
687 val
= cmb
->control_unit_queuing_time
;
689 case cmb_device_active_only_time
:
690 val
= cmb
->device_active_only_time
;
696 ret
= time_to_avg_nsec(val
, cmb
->sample_count
);
698 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
702 static int readall_cmb(struct ccw_device
*cdev
, struct cmbdata
*data
)
705 struct cmb_data
*cmb_data
;
710 ret
= cmf_cmb_copy_wait(cdev
);
713 spin_lock_irqsave(cdev
->ccwlock
, flags
);
714 cmb_data
= cdev
->private->cmb
;
719 if (cmb_data
->last_update
== 0) {
723 cmb
= cmb_data
->last_block
;
724 time
= cmb_data
->last_update
- cdev
->private->cmb_start_time
;
726 memset(data
, 0, sizeof(struct cmbdata
));
728 /* we only know values before device_busy_time */
729 data
->size
= offsetof(struct cmbdata
, device_busy_time
);
731 /* convert to nanoseconds */
732 data
->elapsed_time
= (time
* 1000) >> 12;
734 /* copy data to new structure */
735 data
->ssch_rsch_count
= cmb
->ssch_rsch_count
;
736 data
->sample_count
= cmb
->sample_count
;
738 /* time fields are converted to nanoseconds while copying */
739 data
->device_connect_time
= time_to_nsec(cmb
->device_connect_time
);
740 data
->function_pending_time
= time_to_nsec(cmb
->function_pending_time
);
741 data
->device_disconnect_time
=
742 time_to_nsec(cmb
->device_disconnect_time
);
743 data
->control_unit_queuing_time
744 = time_to_nsec(cmb
->control_unit_queuing_time
);
745 data
->device_active_only_time
746 = time_to_nsec(cmb
->device_active_only_time
);
749 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
753 static void reset_cmb(struct ccw_device
*cdev
)
755 cmf_generic_reset(cdev
);
758 static void * align_cmb(void *area
)
763 static struct attribute_group cmf_attr_group
;
765 static struct cmb_operations cmbops_basic
= {
770 .readall
= readall_cmb
,
773 .attr_group
= &cmf_attr_group
,
776 /* ******** extended cmb handling ********/
779 * struct cmbe - extended channel measurement block
780 * @ssch_rsch_count: number of ssch and rsch
781 * @sample_count: number of samples
782 * @device_connect_time: time of device connect
783 * @function_pending_time: time of function pending
784 * @device_disconnect_time: time of device disconnect
785 * @control_unit_queuing_time: time of control unit queuing
786 * @device_active_only_time: time of device active only
787 * @device_busy_time: time of device busy
788 * @initial_command_response_time: initial command response time
791 * The measurement block as used by the hardware. May be in any 64 bit physical
793 * The fields are described further in z/Architecture Principles of Operation,
794 * third edition, chapter 17.
799 u32 device_connect_time
;
800 u32 function_pending_time
;
801 u32 device_disconnect_time
;
802 u32 control_unit_queuing_time
;
803 u32 device_active_only_time
;
804 u32 device_busy_time
;
805 u32 initial_command_response_time
;
810 * kmalloc only guarantees 8 byte alignment, but we need cmbe
811 * pointers to be naturally aligned. Make sure to allocate
812 * enough space for two cmbes.
814 static inline struct cmbe
*cmbe_align(struct cmbe
*c
)
817 addr
= ((unsigned long)c
+ sizeof (struct cmbe
) - sizeof(long)) &
818 ~(sizeof (struct cmbe
) - sizeof(long));
819 return (struct cmbe
*)addr
;
822 static int alloc_cmbe(struct ccw_device
*cdev
)
825 struct cmb_data
*cmb_data
;
828 cmbe
= kzalloc (sizeof (*cmbe
) * 2, GFP_KERNEL
);
831 cmb_data
= kzalloc(sizeof(struct cmb_data
), GFP_KERNEL
);
836 cmb_data
->last_block
= kzalloc(sizeof(struct cmbe
), GFP_KERNEL
);
837 if (!cmb_data
->last_block
) {
841 cmb_data
->size
= sizeof(struct cmbe
);
842 spin_lock_irq(cdev
->ccwlock
);
843 if (cdev
->private->cmb
) {
844 spin_unlock_irq(cdev
->ccwlock
);
848 cmb_data
->hw_block
= cmbe
;
849 cdev
->private->cmb
= cmb_data
;
850 spin_unlock_irq(cdev
->ccwlock
);
852 /* activate global measurement if this is the first channel */
853 spin_lock(&cmb_area
.lock
);
854 if (list_empty(&cmb_area
.list
))
855 cmf_activate(NULL
, 1);
856 list_add_tail(&cdev
->private->cmb_list
, &cmb_area
.list
);
857 spin_unlock(&cmb_area
.lock
);
862 kfree(cmb_data
->last_block
);
868 static void free_cmbe(struct ccw_device
*cdev
)
870 struct cmb_data
*cmb_data
;
872 spin_lock_irq(cdev
->ccwlock
);
873 cmb_data
= cdev
->private->cmb
;
874 cdev
->private->cmb
= NULL
;
876 kfree(cmb_data
->last_block
);
878 spin_unlock_irq(cdev
->ccwlock
);
880 /* deactivate global measurement if this is the last channel */
881 spin_lock(&cmb_area
.lock
);
882 list_del_init(&cdev
->private->cmb_list
);
883 if (list_empty(&cmb_area
.list
))
884 cmf_activate(NULL
, 0);
885 spin_unlock(&cmb_area
.lock
);
888 static int set_cmbe(struct ccw_device
*cdev
, u32 mme
)
891 struct cmb_data
*cmb_data
;
894 spin_lock_irqsave(cdev
->ccwlock
, flags
);
895 if (!cdev
->private->cmb
) {
896 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
899 cmb_data
= cdev
->private->cmb
;
900 mba
= mme
? (unsigned long) cmbe_align(cmb_data
->hw_block
) : 0;
901 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
903 return set_schib_wait(cdev
, mme
, 1, mba
);
907 static u64
read_cmbe(struct ccw_device
*cdev
, int index
)
910 struct cmb_data
*cmb_data
;
915 ret
= cmf_cmb_copy_wait(cdev
);
919 spin_lock_irqsave(cdev
->ccwlock
, flags
);
920 cmb_data
= cdev
->private->cmb
;
925 cmb
= cmb_data
->last_block
;
928 case cmb_ssch_rsch_count
:
929 ret
= cmb
->ssch_rsch_count
;
931 case cmb_sample_count
:
932 ret
= cmb
->sample_count
;
934 case cmb_device_connect_time
:
935 val
= cmb
->device_connect_time
;
937 case cmb_function_pending_time
:
938 val
= cmb
->function_pending_time
;
940 case cmb_device_disconnect_time
:
941 val
= cmb
->device_disconnect_time
;
943 case cmb_control_unit_queuing_time
:
944 val
= cmb
->control_unit_queuing_time
;
946 case cmb_device_active_only_time
:
947 val
= cmb
->device_active_only_time
;
949 case cmb_device_busy_time
:
950 val
= cmb
->device_busy_time
;
952 case cmb_initial_command_response_time
:
953 val
= cmb
->initial_command_response_time
;
959 ret
= time_to_avg_nsec(val
, cmb
->sample_count
);
961 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
965 static int readall_cmbe(struct ccw_device
*cdev
, struct cmbdata
*data
)
968 struct cmb_data
*cmb_data
;
973 ret
= cmf_cmb_copy_wait(cdev
);
976 spin_lock_irqsave(cdev
->ccwlock
, flags
);
977 cmb_data
= cdev
->private->cmb
;
982 if (cmb_data
->last_update
== 0) {
986 time
= cmb_data
->last_update
- cdev
->private->cmb_start_time
;
988 memset (data
, 0, sizeof(struct cmbdata
));
990 /* we only know values before device_busy_time */
991 data
->size
= offsetof(struct cmbdata
, device_busy_time
);
993 /* conver to nanoseconds */
994 data
->elapsed_time
= (time
* 1000) >> 12;
996 cmb
= cmb_data
->last_block
;
997 /* copy data to new structure */
998 data
->ssch_rsch_count
= cmb
->ssch_rsch_count
;
999 data
->sample_count
= cmb
->sample_count
;
1001 /* time fields are converted to nanoseconds while copying */
1002 data
->device_connect_time
= time_to_nsec(cmb
->device_connect_time
);
1003 data
->function_pending_time
= time_to_nsec(cmb
->function_pending_time
);
1004 data
->device_disconnect_time
=
1005 time_to_nsec(cmb
->device_disconnect_time
);
1006 data
->control_unit_queuing_time
1007 = time_to_nsec(cmb
->control_unit_queuing_time
);
1008 data
->device_active_only_time
1009 = time_to_nsec(cmb
->device_active_only_time
);
1010 data
->device_busy_time
= time_to_nsec(cmb
->device_busy_time
);
1011 data
->initial_command_response_time
1012 = time_to_nsec(cmb
->initial_command_response_time
);
1016 spin_unlock_irqrestore(cdev
->ccwlock
, flags
);
1020 static void reset_cmbe(struct ccw_device
*cdev
)
1022 cmf_generic_reset(cdev
);
1025 static void * align_cmbe(void *area
)
1027 return cmbe_align(area
);
1030 static struct attribute_group cmf_attr_group_ext
;
1032 static struct cmb_operations cmbops_extended
= {
1033 .alloc
= alloc_cmbe
,
1037 .readall
= readall_cmbe
,
1038 .reset
= reset_cmbe
,
1039 .align
= align_cmbe
,
1040 .attr_group
= &cmf_attr_group_ext
,
1043 static ssize_t
cmb_show_attr(struct device
*dev
, char *buf
, enum cmb_index idx
)
1045 return sprintf(buf
, "%lld\n",
1046 (unsigned long long) cmf_read(to_ccwdev(dev
), idx
));
1049 static ssize_t
cmb_show_avg_sample_interval(struct device
*dev
,
1050 struct device_attribute
*attr
,
1053 struct ccw_device
*cdev
;
1055 unsigned long count
;
1056 struct cmb_data
*cmb_data
;
1058 cdev
= to_ccwdev(dev
);
1059 count
= cmf_read(cdev
, cmb_sample_count
);
1060 spin_lock_irq(cdev
->ccwlock
);
1061 cmb_data
= cdev
->private->cmb
;
1063 interval
= cmb_data
->last_update
-
1064 cdev
->private->cmb_start_time
;
1065 interval
= (interval
* 1000) >> 12;
1069 spin_unlock_irq(cdev
->ccwlock
);
1070 return sprintf(buf
, "%ld\n", interval
);
1073 static ssize_t
cmb_show_avg_utilization(struct device
*dev
,
1074 struct device_attribute
*attr
,
1077 struct cmbdata data
;
1082 ret
= cmf_readall(to_ccwdev(dev
), &data
);
1083 if (ret
== -EAGAIN
|| ret
== -ENODEV
)
1084 /* No data (yet/currently) available to use for calculation. */
1085 return sprintf(buf
, "n/a\n");
1089 utilization
= data
.device_connect_time
+
1090 data
.function_pending_time
+
1091 data
.device_disconnect_time
;
1093 /* shift to avoid long long division */
1094 while (-1ul < (data
.elapsed_time
| utilization
)) {
1096 data
.elapsed_time
>>= 8;
1099 /* calculate value in 0.1 percent units */
1100 t
= (unsigned long) data
.elapsed_time
/ 1000;
1101 u
= (unsigned long) utilization
/ t
;
1103 return sprintf(buf
, "%02ld.%01ld%%\n", u
/ 10, u
- (u
/ 10) * 10);
1106 #define cmf_attr(name) \
1107 static ssize_t show_##name(struct device *dev, \
1108 struct device_attribute *attr, char *buf) \
1109 { return cmb_show_attr((dev), buf, cmb_##name); } \
1110 static DEVICE_ATTR(name, 0444, show_##name, NULL);
1112 #define cmf_attr_avg(name) \
1113 static ssize_t show_avg_##name(struct device *dev, \
1114 struct device_attribute *attr, char *buf) \
1115 { return cmb_show_attr((dev), buf, cmb_##name); } \
1116 static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
1118 cmf_attr(ssch_rsch_count
);
1119 cmf_attr(sample_count
);
1120 cmf_attr_avg(device_connect_time
);
1121 cmf_attr_avg(function_pending_time
);
1122 cmf_attr_avg(device_disconnect_time
);
1123 cmf_attr_avg(control_unit_queuing_time
);
1124 cmf_attr_avg(device_active_only_time
);
1125 cmf_attr_avg(device_busy_time
);
1126 cmf_attr_avg(initial_command_response_time
);
1128 static DEVICE_ATTR(avg_sample_interval
, 0444, cmb_show_avg_sample_interval
,
1130 static DEVICE_ATTR(avg_utilization
, 0444, cmb_show_avg_utilization
, NULL
);
1132 static struct attribute
*cmf_attributes
[] = {
1133 &dev_attr_avg_sample_interval
.attr
,
1134 &dev_attr_avg_utilization
.attr
,
1135 &dev_attr_ssch_rsch_count
.attr
,
1136 &dev_attr_sample_count
.attr
,
1137 &dev_attr_avg_device_connect_time
.attr
,
1138 &dev_attr_avg_function_pending_time
.attr
,
1139 &dev_attr_avg_device_disconnect_time
.attr
,
1140 &dev_attr_avg_control_unit_queuing_time
.attr
,
1141 &dev_attr_avg_device_active_only_time
.attr
,
1145 static struct attribute_group cmf_attr_group
= {
1147 .attrs
= cmf_attributes
,
1150 static struct attribute
*cmf_attributes_ext
[] = {
1151 &dev_attr_avg_sample_interval
.attr
,
1152 &dev_attr_avg_utilization
.attr
,
1153 &dev_attr_ssch_rsch_count
.attr
,
1154 &dev_attr_sample_count
.attr
,
1155 &dev_attr_avg_device_connect_time
.attr
,
1156 &dev_attr_avg_function_pending_time
.attr
,
1157 &dev_attr_avg_device_disconnect_time
.attr
,
1158 &dev_attr_avg_control_unit_queuing_time
.attr
,
1159 &dev_attr_avg_device_active_only_time
.attr
,
1160 &dev_attr_avg_device_busy_time
.attr
,
1161 &dev_attr_avg_initial_command_response_time
.attr
,
1165 static struct attribute_group cmf_attr_group_ext
= {
1167 .attrs
= cmf_attributes_ext
,
1170 static ssize_t
cmb_enable_show(struct device
*dev
,
1171 struct device_attribute
*attr
,
1174 return sprintf(buf
, "%d\n", to_ccwdev(dev
)->private->cmb
? 1 : 0);
1177 static ssize_t
cmb_enable_store(struct device
*dev
,
1178 struct device_attribute
*attr
, const char *buf
,
1181 struct ccw_device
*cdev
;
1185 ret
= kstrtoul(buf
, 16, &val
);
1189 cdev
= to_ccwdev(dev
);
1193 ret
= disable_cmf(cdev
);
1196 ret
= enable_cmf(cdev
);
1203 DEVICE_ATTR(cmb_enable
, 0644, cmb_enable_show
, cmb_enable_store
);
1205 int ccw_set_cmf(struct ccw_device
*cdev
, int enable
)
1207 return cmbops
->set(cdev
, enable
? 2 : 0);
1211 * enable_cmf() - switch on the channel measurement for a specific device
1212 * @cdev: The ccw device to be enabled
1214 * Returns %0 for success or a negative error value.
1219 int enable_cmf(struct ccw_device
*cdev
)
1223 ret
= cmbops
->alloc(cdev
);
1224 cmbops
->reset(cdev
);
1227 ret
= cmbops
->set(cdev
, 2);
1232 ret
= sysfs_create_group(&cdev
->dev
.kobj
, cmbops
->attr_group
);
1235 cmbops
->set(cdev
, 0); //FIXME: this can fail
1241 * disable_cmf() - switch off the channel measurement for a specific device
1242 * @cdev: The ccw device to be disabled
1244 * Returns %0 for success or a negative error value.
1249 int disable_cmf(struct ccw_device
*cdev
)
1253 ret
= cmbops
->set(cdev
, 0);
1257 sysfs_remove_group(&cdev
->dev
.kobj
, cmbops
->attr_group
);
1262 * cmf_read() - read one value from the current channel measurement block
1263 * @cdev: the channel to be read
1264 * @index: the index of the value to be read
1266 * Returns the value read or %0 if the value cannot be read.
1271 u64
cmf_read(struct ccw_device
*cdev
, int index
)
1273 return cmbops
->read(cdev
, index
);
1277 * cmf_readall() - read the current channel measurement block
1278 * @cdev: the channel to be read
1279 * @data: a pointer to a data block that will be filled
1281 * Returns %0 on success, a negative error value otherwise.
1286 int cmf_readall(struct ccw_device
*cdev
, struct cmbdata
*data
)
1288 return cmbops
->readall(cdev
, data
);
1291 /* Reenable cmf when a disconnected device becomes available again. */
1292 int cmf_reenable(struct ccw_device
*cdev
)
1294 cmbops
->reset(cdev
);
1295 return cmbops
->set(cdev
, 2);
1298 static int __init
init_cmf(void)
1300 char *format_string
;
1301 char *detect_string
= "parameter";
1304 * If the user did not give a parameter, see if we are running on a
1305 * machine supporting extended measurement blocks, otherwise fall back
1308 if (format
== CMF_AUTODETECT
) {
1309 if (!css_general_characteristics
.ext_mb
) {
1312 format
= CMF_EXTENDED
;
1314 detect_string
= "autodetected";
1316 detect_string
= "parameter";
1321 format_string
= "basic";
1322 cmbops
= &cmbops_basic
;
1325 format_string
= "extended";
1326 cmbops
= &cmbops_extended
;
1331 pr_info("Channel measurement facility initialized using format "
1332 "%s (mode %s)\n", format_string
, detect_string
);
1336 module_init(init_cmf
);
1339 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1340 MODULE_LICENSE("GPL");
1341 MODULE_DESCRIPTION("channel measurement facility base driver\n"
1342 "Copyright IBM Corp. 2003\n");
1344 EXPORT_SYMBOL_GPL(enable_cmf
);
1345 EXPORT_SYMBOL_GPL(disable_cmf
);
1346 EXPORT_SYMBOL_GPL(cmf_read
);
1347 EXPORT_SYMBOL_GPL(cmf_readall
);