2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
27 #include "qxl_object.h"
29 #include <drm/drm_crtc_helper.h>
30 #include <linux/io-mapping.h>
34 static void qxl_dump_mode(struct qxl_device
*qdev
, void *p
)
36 struct qxl_mode
*m
= p
;
37 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
38 m
->id
, m
->x_res
, m
->y_res
, m
->bits
, m
->stride
, m
->x_mili
,
39 m
->y_mili
, m
->orientation
);
42 static bool qxl_check_device(struct qxl_device
*qdev
)
44 struct qxl_rom
*rom
= qdev
->rom
;
48 if (rom
->magic
!= 0x4f525851) {
49 DRM_ERROR("bad rom signature %x\n", rom
->magic
);
53 DRM_INFO("Device Version %d.%d\n", rom
->id
, rom
->update_id
);
54 DRM_INFO("Compression level %d log level %d\n", rom
->compression_level
,
56 DRM_INFO("Currently using mode #%d, list at 0x%x\n",
57 rom
->mode
, rom
->modes_offset
);
58 DRM_INFO("%d io pages at offset 0x%x\n",
59 rom
->num_io_pages
, rom
->pages_offset
);
60 DRM_INFO("%d byte draw area at offset 0x%x\n",
61 rom
->surface0_area_size
, rom
->draw_area_offset
);
63 qdev
->vram_size
= rom
->surface0_area_size
;
64 DRM_INFO("RAM header offset: 0x%x\n", rom
->ram_header_offset
);
66 mode_offset
= rom
->modes_offset
/ 4;
67 qdev
->mode_info
.num_modes
= ((u32
*)rom
)[mode_offset
];
68 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom
->modes_offset
,
69 qdev
->mode_info
.num_modes
);
70 qdev
->mode_info
.modes
= (void *)((uint32_t *)rom
+ mode_offset
+ 1);
71 for (i
= 0; i
< qdev
->mode_info
.num_modes
; i
++)
72 qxl_dump_mode(qdev
, qdev
->mode_info
.modes
+ i
);
76 static void setup_hw_slot(struct qxl_device
*qdev
, int slot_index
,
77 struct qxl_memslot
*slot
)
79 qdev
->ram_header
->mem_slot
.mem_start
= slot
->start_phys_addr
;
80 qdev
->ram_header
->mem_slot
.mem_end
= slot
->end_phys_addr
;
81 qxl_io_memslot_add(qdev
, slot_index
);
84 static uint8_t setup_slot(struct qxl_device
*qdev
, uint8_t slot_index_offset
,
85 unsigned long start_phys_addr
, unsigned long end_phys_addr
)
88 struct qxl_memslot
*slot
;
91 slot_index
= qdev
->rom
->slots_start
+ slot_index_offset
;
92 slot
= &qdev
->mem_slots
[slot_index
];
93 slot
->start_phys_addr
= start_phys_addr
;
94 slot
->end_phys_addr
= end_phys_addr
;
96 setup_hw_slot(qdev
, slot_index
, slot
);
98 slot
->generation
= qdev
->rom
->slot_generation
;
99 high_bits
= slot_index
<< qdev
->slot_gen_bits
;
100 high_bits
|= slot
->generation
;
101 high_bits
<<= (64 - (qdev
->slot_gen_bits
+ qdev
->slot_id_bits
));
102 slot
->high_bits
= high_bits
;
106 void qxl_reinit_memslots(struct qxl_device
*qdev
)
108 setup_hw_slot(qdev
, qdev
->main_mem_slot
, &qdev
->mem_slots
[qdev
->main_mem_slot
]);
109 setup_hw_slot(qdev
, qdev
->surfaces_mem_slot
, &qdev
->mem_slots
[qdev
->surfaces_mem_slot
]);
112 static void qxl_gc_work(struct work_struct
*work
)
114 struct qxl_device
*qdev
= container_of(work
, struct qxl_device
, gc_work
);
115 qxl_garbage_collect(qdev
);
118 static int qxl_device_init(struct qxl_device
*qdev
,
119 struct drm_device
*ddev
,
120 struct pci_dev
*pdev
,
125 qdev
->dev
= &pdev
->dev
;
130 mutex_init(&qdev
->gem
.mutex
);
131 mutex_init(&qdev
->update_area_mutex
);
132 mutex_init(&qdev
->release_mutex
);
133 mutex_init(&qdev
->surf_evict_mutex
);
134 INIT_LIST_HEAD(&qdev
->gem
.objects
);
136 qdev
->rom_base
= pci_resource_start(pdev
, 2);
137 qdev
->rom_size
= pci_resource_len(pdev
, 2);
138 qdev
->vram_base
= pci_resource_start(pdev
, 0);
139 qdev
->io_base
= pci_resource_start(pdev
, 3);
141 qdev
->vram_mapping
= io_mapping_create_wc(qdev
->vram_base
, pci_resource_len(pdev
, 0));
143 if (pci_resource_len(pdev
, 4) > 0) {
144 /* 64bit surface bar present */
146 qdev
->surfaceram_base
= pci_resource_start(pdev
, sb
);
147 qdev
->surfaceram_size
= pci_resource_len(pdev
, sb
);
148 qdev
->surface_mapping
=
149 io_mapping_create_wc(qdev
->surfaceram_base
,
150 qdev
->surfaceram_size
);
152 if (qdev
->surface_mapping
== NULL
) {
153 /* 64bit surface bar not present (or mapping failed) */
155 qdev
->surfaceram_base
= pci_resource_start(pdev
, sb
);
156 qdev
->surfaceram_size
= pci_resource_len(pdev
, sb
);
157 qdev
->surface_mapping
=
158 io_mapping_create_wc(qdev
->surfaceram_base
,
159 qdev
->surfaceram_size
);
162 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
163 (unsigned long long)qdev
->vram_base
,
164 (unsigned long long)pci_resource_end(pdev
, 0),
165 (int)pci_resource_len(pdev
, 0) / 1024 / 1024,
166 (int)pci_resource_len(pdev
, 0) / 1024,
167 (unsigned long long)qdev
->surfaceram_base
,
168 (unsigned long long)pci_resource_end(pdev
, sb
),
169 (int)qdev
->surfaceram_size
/ 1024 / 1024,
170 (int)qdev
->surfaceram_size
/ 1024,
171 (sb
== 4) ? "64bit" : "32bit");
173 qdev
->rom
= ioremap(qdev
->rom_base
, qdev
->rom_size
);
175 pr_err("Unable to ioremap ROM\n");
179 qxl_check_device(qdev
);
181 r
= qxl_bo_init(qdev
);
183 DRM_ERROR("bo init failed %d\n", r
);
187 qdev
->ram_header
= ioremap(qdev
->vram_base
+
188 qdev
->rom
->ram_header_offset
,
189 sizeof(*qdev
->ram_header
));
191 qdev
->command_ring
= qxl_ring_create(&(qdev
->ram_header
->cmd_ring_hdr
),
192 sizeof(struct qxl_command
),
193 QXL_COMMAND_RING_SIZE
,
194 qdev
->io_base
+ QXL_IO_NOTIFY_CMD
,
196 &qdev
->display_event
);
198 qdev
->cursor_ring
= qxl_ring_create(
199 &(qdev
->ram_header
->cursor_ring_hdr
),
200 sizeof(struct qxl_command
),
201 QXL_CURSOR_RING_SIZE
,
202 qdev
->io_base
+ QXL_IO_NOTIFY_CMD
,
204 &qdev
->cursor_event
);
206 qdev
->release_ring
= qxl_ring_create(
207 &(qdev
->ram_header
->release_ring_hdr
),
209 QXL_RELEASE_RING_SIZE
, 0, true,
212 /* TODO - slot initialization should happen on reset. where is our
214 qdev
->n_mem_slots
= qdev
->rom
->slots_end
;
215 qdev
->slot_gen_bits
= qdev
->rom
->slot_gen_bits
;
216 qdev
->slot_id_bits
= qdev
->rom
->slot_id_bits
;
218 (~(uint64_t)0) >> (qdev
->slot_id_bits
+ qdev
->slot_gen_bits
);
221 kmalloc(qdev
->n_mem_slots
* sizeof(struct qxl_memslot
),
224 idr_init(&qdev
->release_idr
);
225 spin_lock_init(&qdev
->release_idr_lock
);
226 spin_lock_init(&qdev
->release_lock
);
228 idr_init(&qdev
->surf_id_idr
);
229 spin_lock_init(&qdev
->surf_id_idr_lock
);
231 mutex_init(&qdev
->async_io_mutex
);
233 /* reset the device into a known state - no memslots, no primary
234 * created, no surfaces. */
237 /* must initialize irq before first async io - slot creation */
238 r
= qxl_irq_init(qdev
);
243 * Note that virtual is surface0. We rely on the single ioremap done
246 qdev
->main_mem_slot
= setup_slot(qdev
, 0,
247 (unsigned long)qdev
->vram_base
,
248 (unsigned long)qdev
->vram_base
+ qdev
->rom
->ram_header_offset
);
249 qdev
->surfaces_mem_slot
= setup_slot(qdev
, 1,
250 (unsigned long)qdev
->surfaceram_base
,
251 (unsigned long)qdev
->surfaceram_base
+ qdev
->surfaceram_size
);
252 DRM_INFO("main mem slot %d [%lx,%x]\n",
254 (unsigned long)qdev
->vram_base
, qdev
->rom
->ram_header_offset
);
255 DRM_INFO("surface mem slot %d [%lx,%lx]\n",
256 qdev
->surfaces_mem_slot
,
257 (unsigned long)qdev
->surfaceram_base
,
258 (unsigned long)qdev
->surfaceram_size
);
261 qdev
->gc_queue
= create_singlethread_workqueue("qxl_gc");
262 INIT_WORK(&qdev
->gc_work
, qxl_gc_work
);
264 r
= qxl_fb_init(qdev
);
271 static void qxl_device_fini(struct qxl_device
*qdev
)
273 if (qdev
->current_release_bo
[0])
274 qxl_bo_unref(&qdev
->current_release_bo
[0]);
275 if (qdev
->current_release_bo
[1])
276 qxl_bo_unref(&qdev
->current_release_bo
[1]);
277 flush_workqueue(qdev
->gc_queue
);
278 destroy_workqueue(qdev
->gc_queue
);
279 qdev
->gc_queue
= NULL
;
281 qxl_ring_free(qdev
->command_ring
);
282 qxl_ring_free(qdev
->cursor_ring
);
283 qxl_ring_free(qdev
->release_ring
);
285 io_mapping_free(qdev
->surface_mapping
);
286 io_mapping_free(qdev
->vram_mapping
);
287 iounmap(qdev
->ram_header
);
290 qdev
->mode_info
.modes
= NULL
;
291 qdev
->mode_info
.num_modes
= 0;
292 qxl_debugfs_remove_files(qdev
);
295 int qxl_driver_unload(struct drm_device
*dev
)
297 struct qxl_device
*qdev
= dev
->dev_private
;
302 drm_vblank_cleanup(dev
);
304 qxl_modeset_fini(qdev
);
305 qxl_device_fini(qdev
);
308 dev
->dev_private
= NULL
;
312 int qxl_driver_load(struct drm_device
*dev
, unsigned long flags
)
314 struct qxl_device
*qdev
;
318 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
321 qdev
= kzalloc(sizeof(struct qxl_device
), GFP_KERNEL
);
325 dev
->dev_private
= qdev
;
327 r
= qxl_device_init(qdev
, dev
, dev
->pdev
, flags
);
331 r
= drm_vblank_init(dev
, 1);
335 r
= qxl_modeset_init(qdev
);
339 drm_kms_helper_poll_init(qdev
->ddev
);
343 qxl_driver_unload(dev
);