Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / s390 / block / dasd_eer.c
blob5ae64af9ccea3d3effe15c3e772cede643b961a5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Character device driver for extended error reporting.
5 * Copyright IBM Corp. 2005
6 * extended error reporting for DASD ECKD devices
7 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8 */
10 #define KMSG_COMPONENT "dasd-eckd"
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/kernel.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/device.h>
19 #include <linux/poll.h>
20 #include <linux/mutex.h>
21 #include <linux/err.h>
22 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/atomic.h>
26 #include <asm/ebcdic.h>
28 #include "dasd_int.h"
29 #include "dasd_eckd.h"
31 #ifdef PRINTK_HEADER
32 #undef PRINTK_HEADER
33 #endif /* PRINTK_HEADER */
34 #define PRINTK_HEADER "dasd(eer):"
37 * SECTION: the internal buffer
41 * The internal buffer is meant to store obaque blobs of data, so it does
42 * not know of higher level concepts like triggers.
43 * It consists of a number of pages that are used as a ringbuffer. Each data
44 * blob is stored in a simple record that consists of an integer, which
45 * contains the size of the following data, and the data bytes themselfes.
47 * To allow for multiple independent readers we create one internal buffer
48 * each time the device is opened and destroy the buffer when the file is
49 * closed again. The number of pages used for this buffer is determined by
50 * the module parmeter eer_pages.
52 * One record can be written to a buffer by using the functions
53 * - dasd_eer_start_record (one time per record to write the size to the
54 * buffer and reserve the space for the data)
55 * - dasd_eer_write_buffer (one or more times per record to write the data)
56 * The data can be written in several steps but you will have to compute
57 * the total size up front for the invocation of dasd_eer_start_record.
58 * If the ringbuffer is full, dasd_eer_start_record will remove the required
59 * number of old records.
61 * A record is typically read in two steps, first read the integer that
62 * specifies the size of the following data, then read the data.
63 * Both can be done by
64 * - dasd_eer_read_buffer
66 * For all mentioned functions you need to get the bufferlock first and keep
67 * it until a complete record is written or read.
69 * All information necessary to keep track of an internal buffer is kept in
70 * a struct eerbuffer. The buffer specific to a file pointer is strored in
71 * the private_data field of that file. To be able to write data to all
72 * existing buffers, each buffer is also added to the bufferlist.
73 * If the user does not want to read a complete record in one go, we have to
74 * keep track of the rest of the record. residual stores the number of bytes
75 * that are still to deliver. If the rest of the record is invalidated between
76 * two reads then residual will be set to -1 so that the next read will fail.
77 * All entries in the eerbuffer structure are protected with the bufferlock.
78 * To avoid races between writing to a buffer on the one side and creating
79 * and destroying buffers on the other side, the bufferlock must also be used
80 * to protect the bufferlist.
83 static int eer_pages = 5;
84 module_param(eer_pages, int, S_IRUGO|S_IWUSR);
86 struct eerbuffer {
87 struct list_head list;
88 char **buffer;
89 int buffersize;
90 int buffer_page_count;
91 int head;
92 int tail;
93 int residual;
96 static LIST_HEAD(bufferlist);
97 static DEFINE_SPINLOCK(bufferlock);
98 static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
101 * How many free bytes are available on the buffer.
102 * Needs to be called with bufferlock held.
104 static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
106 if (eerb->head < eerb->tail)
107 return eerb->tail - eerb->head - 1;
108 return eerb->buffersize - eerb->head + eerb->tail -1;
112 * How many bytes of buffer space are used.
113 * Needs to be called with bufferlock held.
115 static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
118 if (eerb->head >= eerb->tail)
119 return eerb->head - eerb->tail;
120 return eerb->buffersize - eerb->tail + eerb->head;
124 * The dasd_eer_write_buffer function just copies count bytes of data
125 * to the buffer. Make sure to call dasd_eer_start_record first, to
126 * make sure that enough free space is available.
127 * Needs to be called with bufferlock held.
129 static void dasd_eer_write_buffer(struct eerbuffer *eerb,
130 char *data, int count)
133 unsigned long headindex,localhead;
134 unsigned long rest, len;
135 char *nextdata;
137 nextdata = data;
138 rest = count;
139 while (rest > 0) {
140 headindex = eerb->head / PAGE_SIZE;
141 localhead = eerb->head % PAGE_SIZE;
142 len = min(rest, PAGE_SIZE - localhead);
143 memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
144 nextdata += len;
145 rest -= len;
146 eerb->head += len;
147 if (eerb->head == eerb->buffersize)
148 eerb->head = 0; /* wrap around */
149 BUG_ON(eerb->head > eerb->buffersize);
154 * Needs to be called with bufferlock held.
156 static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
159 unsigned long tailindex,localtail;
160 unsigned long rest, len, finalcount;
161 char *nextdata;
163 finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
164 nextdata = data;
165 rest = finalcount;
166 while (rest > 0) {
167 tailindex = eerb->tail / PAGE_SIZE;
168 localtail = eerb->tail % PAGE_SIZE;
169 len = min(rest, PAGE_SIZE - localtail);
170 memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
171 nextdata += len;
172 rest -= len;
173 eerb->tail += len;
174 if (eerb->tail == eerb->buffersize)
175 eerb->tail = 0; /* wrap around */
176 BUG_ON(eerb->tail > eerb->buffersize);
178 return finalcount;
182 * Whenever you want to write a blob of data to the internal buffer you
183 * have to start by using this function first. It will write the number
184 * of bytes that will be written to the buffer. If necessary it will remove
185 * old records to make room for the new one.
186 * Needs to be called with bufferlock held.
188 static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
190 int tailcount;
192 if (count + sizeof(count) > eerb->buffersize)
193 return -ENOMEM;
194 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
195 if (eerb->residual > 0) {
196 eerb->tail += eerb->residual;
197 if (eerb->tail >= eerb->buffersize)
198 eerb->tail -= eerb->buffersize;
199 eerb->residual = -1;
201 dasd_eer_read_buffer(eerb, (char *) &tailcount,
202 sizeof(tailcount));
203 eerb->tail += tailcount;
204 if (eerb->tail >= eerb->buffersize)
205 eerb->tail -= eerb->buffersize;
207 dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
209 return 0;
213 * Release pages that are not used anymore.
215 static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
217 int i;
219 for (i = 0; i < no_pages; i++)
220 free_page((unsigned long) buf[i]);
224 * Allocate a new set of memory pages.
226 static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
228 int i;
230 for (i = 0; i < no_pages; i++) {
231 buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
232 if (!buf[i]) {
233 dasd_eer_free_buffer_pages(buf, i);
234 return -ENOMEM;
237 return 0;
241 * SECTION: The extended error reporting functionality
245 * When a DASD device driver wants to report an error, it calls the
246 * function dasd_eer_write and gives the respective trigger ID as
247 * parameter. Currently there are four kinds of triggers:
249 * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
250 * DASD_EER_PPRCSUSPEND: PPRC was suspended
251 * DASD_EER_NOPATH: There is no path to the device left.
252 * DASD_EER_STATECHANGE: The state of the device has changed.
254 * For the first three triggers all required information can be supplied by
255 * the caller. For these triggers a record is written by the function
256 * dasd_eer_write_standard_trigger.
258 * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
259 * status ccw need to be executed to gather the necessary sense data first.
260 * The dasd_eer_snss function will queue the SNSS request and the request
261 * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
262 * trigger.
264 * To avoid memory allocations at runtime, the necessary memory is allocated
265 * when the extended error reporting is enabled for a device (by
266 * dasd_eer_probe). There is one sense subsystem status request for each
267 * eer enabled DASD device. The presence of the cqr in device->eer_cqr
268 * indicates that eer is enable for the device. The use of the snss request
269 * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
270 * that the cqr is currently in use, dasd_eer_snss cannot start a second
271 * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
272 * the SNSS request will check the bit and call dasd_eer_snss again.
275 #define SNSS_DATA_SIZE 44
277 #define DASD_EER_BUSID_SIZE 10
278 struct dasd_eer_header {
279 __u32 total_size;
280 __u32 trigger;
281 __u64 tv_sec;
282 __u64 tv_usec;
283 char busid[DASD_EER_BUSID_SIZE];
284 } __attribute__ ((packed));
287 * The following function can be used for those triggers that have
288 * all necessary data available when the function is called.
289 * If the parameter cqr is not NULL, the chain of requests will be searched
290 * for valid sense data, and all valid sense data sets will be added to
291 * the triggers data.
293 static void dasd_eer_write_standard_trigger(struct dasd_device *device,
294 struct dasd_ccw_req *cqr,
295 int trigger)
297 struct dasd_ccw_req *temp_cqr;
298 int data_size;
299 struct timespec64 ts;
300 struct dasd_eer_header header;
301 unsigned long flags;
302 struct eerbuffer *eerb;
303 char *sense;
305 /* go through cqr chain and count the valid sense data sets */
306 data_size = 0;
307 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
308 if (dasd_get_sense(&temp_cqr->irb))
309 data_size += 32;
311 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
312 header.trigger = trigger;
313 ktime_get_real_ts64(&ts);
314 header.tv_sec = ts.tv_sec;
315 header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
316 strlcpy(header.busid, dev_name(&device->cdev->dev),
317 DASD_EER_BUSID_SIZE);
319 spin_lock_irqsave(&bufferlock, flags);
320 list_for_each_entry(eerb, &bufferlist, list) {
321 dasd_eer_start_record(eerb, header.total_size);
322 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
323 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
324 sense = dasd_get_sense(&temp_cqr->irb);
325 if (sense)
326 dasd_eer_write_buffer(eerb, sense, 32);
328 dasd_eer_write_buffer(eerb, "EOR", 4);
330 spin_unlock_irqrestore(&bufferlock, flags);
331 wake_up_interruptible(&dasd_eer_read_wait_queue);
335 * This function writes a DASD_EER_STATECHANGE trigger.
337 static void dasd_eer_write_snss_trigger(struct dasd_device *device,
338 struct dasd_ccw_req *cqr,
339 int trigger)
341 int data_size;
342 int snss_rc;
343 struct timespec64 ts;
344 struct dasd_eer_header header;
345 unsigned long flags;
346 struct eerbuffer *eerb;
348 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
349 if (snss_rc)
350 data_size = 0;
351 else
352 data_size = SNSS_DATA_SIZE;
354 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
355 header.trigger = DASD_EER_STATECHANGE;
356 ktime_get_real_ts64(&ts);
357 header.tv_sec = ts.tv_sec;
358 header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
359 strlcpy(header.busid, dev_name(&device->cdev->dev),
360 DASD_EER_BUSID_SIZE);
362 spin_lock_irqsave(&bufferlock, flags);
363 list_for_each_entry(eerb, &bufferlist, list) {
364 dasd_eer_start_record(eerb, header.total_size);
365 dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
366 if (!snss_rc)
367 dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
368 dasd_eer_write_buffer(eerb, "EOR", 4);
370 spin_unlock_irqrestore(&bufferlock, flags);
371 wake_up_interruptible(&dasd_eer_read_wait_queue);
375 * This function is called for all triggers. It calls the appropriate
376 * function that writes the actual trigger records.
378 void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
379 unsigned int id)
381 if (!device->eer_cqr)
382 return;
383 switch (id) {
384 case DASD_EER_FATALERROR:
385 case DASD_EER_PPRCSUSPEND:
386 dasd_eer_write_standard_trigger(device, cqr, id);
387 break;
388 case DASD_EER_NOPATH:
389 case DASD_EER_NOSPC:
390 dasd_eer_write_standard_trigger(device, NULL, id);
391 break;
392 case DASD_EER_STATECHANGE:
393 dasd_eer_write_snss_trigger(device, cqr, id);
394 break;
395 default: /* unknown trigger, so we write it without any sense data */
396 dasd_eer_write_standard_trigger(device, NULL, id);
397 break;
400 EXPORT_SYMBOL(dasd_eer_write);
403 * Start a sense subsystem status request.
404 * Needs to be called with the device held.
406 void dasd_eer_snss(struct dasd_device *device)
408 struct dasd_ccw_req *cqr;
410 cqr = device->eer_cqr;
411 if (!cqr) /* Device not eer enabled. */
412 return;
413 if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
414 /* Sense subsystem status request in use. */
415 set_bit(DASD_FLAG_EER_SNSS, &device->flags);
416 return;
418 /* cdev is already locked, can't use dasd_add_request_head */
419 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
420 cqr->status = DASD_CQR_QUEUED;
421 list_add(&cqr->devlist, &device->ccw_queue);
422 dasd_schedule_device_bh(device);
426 * Callback function for use with sense subsystem status request.
428 static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
430 struct dasd_device *device = cqr->startdev;
431 unsigned long flags;
433 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
434 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
435 if (device->eer_cqr == cqr) {
436 clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
437 if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
438 /* Another SNSS has been requested in the meantime. */
439 dasd_eer_snss(device);
440 cqr = NULL;
442 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
443 if (cqr)
445 * Extended error recovery has been switched off while
446 * the SNSS request was running. It could even have
447 * been switched off and on again in which case there
448 * is a new ccw in device->eer_cqr. Free the "old"
449 * snss request now.
451 dasd_sfree_request(cqr, device);
455 * Enable error reporting on a given device.
457 int dasd_eer_enable(struct dasd_device *device)
459 struct dasd_ccw_req *cqr = NULL;
460 unsigned long flags;
461 struct ccw1 *ccw;
462 int rc = 0;
464 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
465 if (device->eer_cqr)
466 goto out;
467 else if (!device->discipline ||
468 strcmp(device->discipline->name, "ECKD"))
469 rc = -EMEDIUMTYPE;
470 else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
471 rc = -EBUSY;
473 if (rc)
474 goto out;
476 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
477 SNSS_DATA_SIZE, device, NULL);
478 if (IS_ERR(cqr)) {
479 rc = -ENOMEM;
480 cqr = NULL;
481 goto out;
484 cqr->startdev = device;
485 cqr->retries = 255;
486 cqr->expires = 10 * HZ;
487 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
488 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
490 ccw = cqr->cpaddr;
491 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
492 ccw->count = SNSS_DATA_SIZE;
493 ccw->flags = 0;
494 ccw->cda = (__u32)(addr_t) cqr->data;
496 cqr->buildclk = get_tod_clock();
497 cqr->status = DASD_CQR_FILLED;
498 cqr->callback = dasd_eer_snss_cb;
500 if (!device->eer_cqr) {
501 device->eer_cqr = cqr;
502 cqr = NULL;
505 out:
506 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
508 if (cqr)
509 dasd_sfree_request(cqr, device);
511 return rc;
515 * Disable error reporting on a given device.
517 void dasd_eer_disable(struct dasd_device *device)
519 struct dasd_ccw_req *cqr;
520 unsigned long flags;
521 int in_use;
523 if (!device->eer_cqr)
524 return;
525 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
526 cqr = device->eer_cqr;
527 device->eer_cqr = NULL;
528 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
529 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
530 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
531 if (cqr && !in_use)
532 dasd_sfree_request(cqr, device);
536 * SECTION: the device operations
540 * On the one side we need a lock to access our internal buffer, on the
541 * other side a copy_to_user can sleep. So we need to copy the data we have
542 * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
544 static char readbuffer[PAGE_SIZE];
545 static DEFINE_MUTEX(readbuffer_mutex);
547 static int dasd_eer_open(struct inode *inp, struct file *filp)
549 struct eerbuffer *eerb;
550 unsigned long flags;
552 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
553 if (!eerb)
554 return -ENOMEM;
555 eerb->buffer_page_count = eer_pages;
556 if (eerb->buffer_page_count < 1 ||
557 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
558 kfree(eerb);
559 DBF_EVENT(DBF_WARNING, "can't open device since module "
560 "parameter eer_pages is smaller than 1 or"
561 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
562 return -EINVAL;
564 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
565 eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
566 GFP_KERNEL);
567 if (!eerb->buffer) {
568 kfree(eerb);
569 return -ENOMEM;
571 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
572 eerb->buffer_page_count)) {
573 kfree(eerb->buffer);
574 kfree(eerb);
575 return -ENOMEM;
577 filp->private_data = eerb;
578 spin_lock_irqsave(&bufferlock, flags);
579 list_add(&eerb->list, &bufferlist);
580 spin_unlock_irqrestore(&bufferlock, flags);
582 return nonseekable_open(inp,filp);
585 static int dasd_eer_close(struct inode *inp, struct file *filp)
587 struct eerbuffer *eerb;
588 unsigned long flags;
590 eerb = (struct eerbuffer *) filp->private_data;
591 spin_lock_irqsave(&bufferlock, flags);
592 list_del(&eerb->list);
593 spin_unlock_irqrestore(&bufferlock, flags);
594 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
595 kfree(eerb->buffer);
596 kfree(eerb);
598 return 0;
601 static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
602 size_t count, loff_t *ppos)
604 int tc,rc;
605 int tailcount,effective_count;
606 unsigned long flags;
607 struct eerbuffer *eerb;
609 eerb = (struct eerbuffer *) filp->private_data;
610 if (mutex_lock_interruptible(&readbuffer_mutex))
611 return -ERESTARTSYS;
613 spin_lock_irqsave(&bufferlock, flags);
615 if (eerb->residual < 0) { /* the remainder of this record */
616 /* has been deleted */
617 eerb->residual = 0;
618 spin_unlock_irqrestore(&bufferlock, flags);
619 mutex_unlock(&readbuffer_mutex);
620 return -EIO;
621 } else if (eerb->residual > 0) {
622 /* OK we still have a second half of a record to deliver */
623 effective_count = min(eerb->residual, (int) count);
624 eerb->residual -= effective_count;
625 } else {
626 tc = 0;
627 while (!tc) {
628 tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
629 sizeof(tailcount));
630 if (!tc) {
631 /* no data available */
632 spin_unlock_irqrestore(&bufferlock, flags);
633 mutex_unlock(&readbuffer_mutex);
634 if (filp->f_flags & O_NONBLOCK)
635 return -EAGAIN;
636 rc = wait_event_interruptible(
637 dasd_eer_read_wait_queue,
638 eerb->head != eerb->tail);
639 if (rc)
640 return rc;
641 if (mutex_lock_interruptible(&readbuffer_mutex))
642 return -ERESTARTSYS;
643 spin_lock_irqsave(&bufferlock, flags);
646 WARN_ON(tc != sizeof(tailcount));
647 effective_count = min(tailcount,(int)count);
648 eerb->residual = tailcount - effective_count;
651 tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
652 WARN_ON(tc != effective_count);
654 spin_unlock_irqrestore(&bufferlock, flags);
656 if (copy_to_user(buf, readbuffer, effective_count)) {
657 mutex_unlock(&readbuffer_mutex);
658 return -EFAULT;
661 mutex_unlock(&readbuffer_mutex);
662 return effective_count;
665 static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
667 __poll_t mask;
668 unsigned long flags;
669 struct eerbuffer *eerb;
671 eerb = (struct eerbuffer *) filp->private_data;
672 poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
673 spin_lock_irqsave(&bufferlock, flags);
674 if (eerb->head != eerb->tail)
675 mask = EPOLLIN | EPOLLRDNORM ;
676 else
677 mask = 0;
678 spin_unlock_irqrestore(&bufferlock, flags);
679 return mask;
682 static const struct file_operations dasd_eer_fops = {
683 .open = &dasd_eer_open,
684 .release = &dasd_eer_close,
685 .read = &dasd_eer_read,
686 .poll = &dasd_eer_poll,
687 .owner = THIS_MODULE,
688 .llseek = noop_llseek,
691 static struct miscdevice *dasd_eer_dev = NULL;
693 int __init dasd_eer_init(void)
695 int rc;
697 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
698 if (!dasd_eer_dev)
699 return -ENOMEM;
701 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
702 dasd_eer_dev->name = "dasd_eer";
703 dasd_eer_dev->fops = &dasd_eer_fops;
705 rc = misc_register(dasd_eer_dev);
706 if (rc) {
707 kfree(dasd_eer_dev);
708 dasd_eer_dev = NULL;
709 DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
710 "register misc device");
711 return rc;
714 return 0;
717 void dasd_eer_exit(void)
719 if (dasd_eer_dev) {
720 misc_deregister(dasd_eer_dev);
721 kfree(dasd_eer_dev);
722 dasd_eer_dev = NULL;