drm/omap: Fix error handling path in 'omap_dmm_probe()'
[linux/fpc-iii.git] / drivers / gpu / drm / sti / sti_gdp.c
blobc85dc7d6b0053d2f45a673e43bbda6b99032bcf7
1 /*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
12 #include <drm/drm_fb_cma_helper.h>
13 #include <drm/drm_gem_cma_helper.h>
15 #include "sti_compositor.h"
16 #include "sti_gdp.h"
17 #include "sti_plane.h"
18 #include "sti_vtg.h"
20 #define ALPHASWITCH BIT(6)
21 #define ENA_COLOR_FILL BIT(8)
22 #define BIGNOTLITTLE BIT(23)
23 #define WAIT_NEXT_VSYNC BIT(31)
25 /* GDP color formats */
26 #define GDP_RGB565 0x00
27 #define GDP_RGB888 0x01
28 #define GDP_RGB888_32 0x02
29 #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
30 #define GDP_ARGB8565 0x04
31 #define GDP_ARGB8888 0x05
32 #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
33 #define GDP_ARGB1555 0x06
34 #define GDP_ARGB4444 0x07
35 #define GDP_CLUT8 0x0B
36 #define GDP_YCBR888 0x10
37 #define GDP_YCBR422R 0x12
38 #define GDP_AYCBR8888 0x15
40 #define GAM_GDP_CTL_OFFSET 0x00
41 #define GAM_GDP_AGC_OFFSET 0x04
42 #define GAM_GDP_VPO_OFFSET 0x0C
43 #define GAM_GDP_VPS_OFFSET 0x10
44 #define GAM_GDP_PML_OFFSET 0x14
45 #define GAM_GDP_PMP_OFFSET 0x18
46 #define GAM_GDP_SIZE_OFFSET 0x1C
47 #define GAM_GDP_NVN_OFFSET 0x24
48 #define GAM_GDP_KEY1_OFFSET 0x28
49 #define GAM_GDP_KEY2_OFFSET 0x2C
50 #define GAM_GDP_PPT_OFFSET 0x34
51 #define GAM_GDP_CML_OFFSET 0x3C
52 #define GAM_GDP_MST_OFFSET 0x68
54 #define GAM_GDP_ALPHARANGE_255 BIT(5)
55 #define GAM_GDP_AGC_FULL_RANGE 0x00808080
56 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
57 #define GAM_GDP_SIZE_MAX 0x7FF
59 #define GDP_NODE_NB_BANK 2
60 #define GDP_NODE_PER_FIELD 2
62 struct sti_gdp_node {
63 u32 gam_gdp_ctl;
64 u32 gam_gdp_agc;
65 u32 reserved1;
66 u32 gam_gdp_vpo;
67 u32 gam_gdp_vps;
68 u32 gam_gdp_pml;
69 u32 gam_gdp_pmp;
70 u32 gam_gdp_size;
71 u32 reserved2;
72 u32 gam_gdp_nvn;
73 u32 gam_gdp_key1;
74 u32 gam_gdp_key2;
75 u32 reserved3;
76 u32 gam_gdp_ppt;
77 u32 reserved4;
78 u32 gam_gdp_cml;
81 struct sti_gdp_node_list {
82 struct sti_gdp_node *top_field;
83 dma_addr_t top_field_paddr;
84 struct sti_gdp_node *btm_field;
85 dma_addr_t btm_field_paddr;
88 /**
89 * STI GDP structure
91 * @sti_plane: sti_plane structure
92 * @dev: driver device
93 * @regs: gdp registers
94 * @clk_pix: pixel clock for the current gdp
95 * @clk_main_parent: gdp parent clock if main path used
96 * @clk_aux_parent: gdp parent clock if aux path used
97 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
98 * @is_curr_top: true if the current node processed is the top field
99 * @node_list: array of node list
101 struct sti_gdp {
102 struct sti_plane plane;
103 struct device *dev;
104 void __iomem *regs;
105 struct clk *clk_pix;
106 struct clk *clk_main_parent;
107 struct clk *clk_aux_parent;
108 struct notifier_block vtg_field_nb;
109 bool is_curr_top;
110 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
113 #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
115 static const uint32_t gdp_supported_formats[] = {
116 DRM_FORMAT_XRGB8888,
117 DRM_FORMAT_XBGR8888,
118 DRM_FORMAT_ARGB8888,
119 DRM_FORMAT_ABGR8888,
120 DRM_FORMAT_ARGB4444,
121 DRM_FORMAT_ARGB1555,
122 DRM_FORMAT_RGB565,
123 DRM_FORMAT_RGB888,
124 DRM_FORMAT_AYUV,
125 DRM_FORMAT_YUV444,
126 DRM_FORMAT_VYUY,
127 DRM_FORMAT_C8,
130 static int sti_gdp_fourcc2format(int fourcc)
132 switch (fourcc) {
133 case DRM_FORMAT_XRGB8888:
134 return GDP_RGB888_32;
135 case DRM_FORMAT_XBGR8888:
136 return GDP_XBGR8888;
137 case DRM_FORMAT_ARGB8888:
138 return GDP_ARGB8888;
139 case DRM_FORMAT_ABGR8888:
140 return GDP_ABGR8888;
141 case DRM_FORMAT_ARGB4444:
142 return GDP_ARGB4444;
143 case DRM_FORMAT_ARGB1555:
144 return GDP_ARGB1555;
145 case DRM_FORMAT_RGB565:
146 return GDP_RGB565;
147 case DRM_FORMAT_RGB888:
148 return GDP_RGB888;
149 case DRM_FORMAT_AYUV:
150 return GDP_AYCBR8888;
151 case DRM_FORMAT_YUV444:
152 return GDP_YCBR888;
153 case DRM_FORMAT_VYUY:
154 return GDP_YCBR422R;
155 case DRM_FORMAT_C8:
156 return GDP_CLUT8;
158 return -1;
161 static int sti_gdp_get_alpharange(int format)
163 switch (format) {
164 case GDP_ARGB8565:
165 case GDP_ARGB8888:
166 case GDP_AYCBR8888:
167 case GDP_ABGR8888:
168 return GAM_GDP_ALPHARANGE_255;
170 return 0;
174 * sti_gdp_get_free_nodes
175 * @gdp: gdp pointer
177 * Look for a GDP node list that is not currently read by the HW.
179 * RETURNS:
180 * Pointer to the free GDP node list
182 static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
184 int hw_nvn;
185 unsigned int i;
187 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
188 if (!hw_nvn)
189 goto end;
191 for (i = 0; i < GDP_NODE_NB_BANK; i++)
192 if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
193 (hw_nvn != gdp->node_list[i].top_field_paddr))
194 return &gdp->node_list[i];
196 /* in hazardious cases restart with the first node */
197 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
198 sti_plane_to_str(&gdp->plane), hw_nvn);
200 end:
201 return &gdp->node_list[0];
205 * sti_gdp_get_current_nodes
206 * @gdp: gdp pointer
208 * Look for GDP nodes that are currently read by the HW.
210 * RETURNS:
211 * Pointer to the current GDP node list
213 static
214 struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
216 int hw_nvn;
217 unsigned int i;
219 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
220 if (!hw_nvn)
221 goto end;
223 for (i = 0; i < GDP_NODE_NB_BANK; i++)
224 if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
225 (hw_nvn == gdp->node_list[i].top_field_paddr))
226 return &gdp->node_list[i];
228 end:
229 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
230 hw_nvn, sti_plane_to_str(&gdp->plane));
232 return NULL;
236 * sti_gdp_disable
237 * @gdp: gdp pointer
239 * Disable a GDP.
241 static void sti_gdp_disable(struct sti_gdp *gdp)
243 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
244 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
245 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
246 unsigned int i;
248 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
250 /* Set the nodes as 'to be ignored on mixer' */
251 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
252 gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
253 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
256 if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
257 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
258 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
260 if (gdp->clk_pix)
261 clk_disable_unprepare(gdp->clk_pix);
263 gdp->plane.status = STI_PLANE_DISABLED;
267 * sti_gdp_field_cb
268 * @nb: notifier block
269 * @event: event message
270 * @data: private data
272 * Handle VTG top field and bottom field event.
274 * RETURNS:
275 * 0 on success.
277 int sti_gdp_field_cb(struct notifier_block *nb,
278 unsigned long event, void *data)
280 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
282 if (gdp->plane.status == STI_PLANE_FLUSHING) {
283 /* disable need to be synchronize on vsync event */
284 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
285 sti_plane_to_str(&gdp->plane));
287 sti_gdp_disable(gdp);
290 switch (event) {
291 case VTG_TOP_FIELD_EVENT:
292 gdp->is_curr_top = true;
293 break;
294 case VTG_BOTTOM_FIELD_EVENT:
295 gdp->is_curr_top = false;
296 break;
297 default:
298 DRM_ERROR("unsupported event: %lu\n", event);
299 break;
302 return 0;
305 static void sti_gdp_init(struct sti_gdp *gdp)
307 struct device_node *np = gdp->dev->of_node;
308 dma_addr_t dma_addr;
309 void *base;
310 unsigned int i, size;
312 /* Allocate all the nodes within a single memory page */
313 size = sizeof(struct sti_gdp_node) *
314 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
315 base = dma_alloc_writecombine(gdp->dev,
316 size, &dma_addr, GFP_KERNEL | GFP_DMA);
318 if (!base) {
319 DRM_ERROR("Failed to allocate memory for GDP node\n");
320 return;
322 memset(base, 0, size);
324 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
325 if (dma_addr & 0xF) {
326 DRM_ERROR("Mem alignment failed\n");
327 return;
329 gdp->node_list[i].top_field = base;
330 gdp->node_list[i].top_field_paddr = dma_addr;
332 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
333 base += sizeof(struct sti_gdp_node);
334 dma_addr += sizeof(struct sti_gdp_node);
336 if (dma_addr & 0xF) {
337 DRM_ERROR("Mem alignment failed\n");
338 return;
340 gdp->node_list[i].btm_field = base;
341 gdp->node_list[i].btm_field_paddr = dma_addr;
342 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
343 base += sizeof(struct sti_gdp_node);
344 dma_addr += sizeof(struct sti_gdp_node);
347 if (of_device_is_compatible(np, "st,stih407-compositor")) {
348 /* GDP of STiH407 chip have its own pixel clock */
349 char *clk_name;
351 switch (gdp->plane.desc) {
352 case STI_GDP_0:
353 clk_name = "pix_gdp1";
354 break;
355 case STI_GDP_1:
356 clk_name = "pix_gdp2";
357 break;
358 case STI_GDP_2:
359 clk_name = "pix_gdp3";
360 break;
361 case STI_GDP_3:
362 clk_name = "pix_gdp4";
363 break;
364 default:
365 DRM_ERROR("GDP id not recognized\n");
366 return;
369 gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
370 if (IS_ERR(gdp->clk_pix))
371 DRM_ERROR("Cannot get %s clock\n", clk_name);
373 gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
374 if (IS_ERR(gdp->clk_main_parent))
375 DRM_ERROR("Cannot get main_parent clock\n");
377 gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
378 if (IS_ERR(gdp->clk_aux_parent))
379 DRM_ERROR("Cannot get aux_parent clock\n");
383 static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
384 struct drm_plane_state *oldstate)
386 struct drm_plane_state *state = drm_plane->state;
387 struct sti_plane *plane = to_sti_plane(drm_plane);
388 struct sti_gdp *gdp = to_sti_gdp(plane);
389 struct drm_crtc *crtc = state->crtc;
390 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
391 struct drm_framebuffer *fb = state->fb;
392 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
393 struct sti_mixer *mixer;
394 struct drm_display_mode *mode;
395 int dst_x, dst_y, dst_w, dst_h;
396 int src_x, src_y, src_w, src_h;
397 struct drm_gem_cma_object *cma_obj;
398 struct sti_gdp_node_list *list;
399 struct sti_gdp_node_list *curr_list;
400 struct sti_gdp_node *top_field, *btm_field;
401 u32 dma_updated_top;
402 u32 dma_updated_btm;
403 int format;
404 unsigned int depth, bpp;
405 u32 ydo, xdo, yds, xds;
406 int res;
408 /* Manage the case where crtc is null (disabled) */
409 if (!crtc)
410 return;
412 mixer = to_sti_mixer(crtc);
413 mode = &crtc->mode;
414 dst_x = state->crtc_x;
415 dst_y = state->crtc_y;
416 dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
417 dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
418 /* src_x are in 16.16 format */
419 src_x = state->src_x >> 16;
420 src_y = state->src_y >> 16;
421 src_w = state->src_w >> 16;
422 src_h = state->src_h >> 16;
424 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
425 crtc->base.id, sti_mixer_to_str(mixer),
426 drm_plane->base.id, sti_plane_to_str(plane));
427 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
428 sti_plane_to_str(plane),
429 dst_w, dst_h, dst_x, dst_y,
430 src_w, src_h, src_x, src_y);
432 list = sti_gdp_get_free_nodes(gdp);
433 top_field = list->top_field;
434 btm_field = list->btm_field;
436 dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
437 sti_plane_to_str(plane), top_field, btm_field);
439 /* build the top field */
440 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
441 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
442 format = sti_gdp_fourcc2format(fb->pixel_format);
443 if (format == -1) {
444 DRM_ERROR("Format not supported by GDP %.4s\n",
445 (char *)&fb->pixel_format);
446 return;
448 top_field->gam_gdp_ctl |= format;
449 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
450 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
452 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
453 if (!cma_obj) {
454 DRM_ERROR("Can't get CMA GEM object for fb\n");
455 return;
458 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
459 (char *)&fb->pixel_format,
460 (unsigned long)cma_obj->paddr);
462 /* pixel memory location */
463 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
464 top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
465 top_field->gam_gdp_pml += src_x * (bpp >> 3);
466 top_field->gam_gdp_pml += src_y * fb->pitches[0];
468 /* input parameters */
469 top_field->gam_gdp_pmp = fb->pitches[0];
470 top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
471 clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
473 /* output parameters */
474 ydo = sti_vtg_get_line_number(*mode, dst_y);
475 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
476 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
477 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
478 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
479 top_field->gam_gdp_vps = (yds << 16) | xds;
481 /* Same content and chained together */
482 memcpy(btm_field, top_field, sizeof(*btm_field));
483 top_field->gam_gdp_nvn = list->btm_field_paddr;
484 btm_field->gam_gdp_nvn = list->top_field_paddr;
486 /* Interlaced mode */
487 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
488 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
489 fb->pitches[0];
491 if (first_prepare) {
492 /* Register gdp callback */
493 if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
494 compo->vtg_main : compo->vtg_aux,
495 &gdp->vtg_field_nb, crtc)) {
496 DRM_ERROR("Cannot register VTG notifier\n");
497 return;
500 /* Set and enable gdp clock */
501 if (gdp->clk_pix) {
502 struct clk *clkp;
503 int rate = mode->clock * 1000;
505 /* According to the mixer used, the gdp pixel clock
506 * should have a different parent clock. */
507 if (mixer->id == STI_MIXER_MAIN)
508 clkp = gdp->clk_main_parent;
509 else
510 clkp = gdp->clk_aux_parent;
512 if (clkp)
513 clk_set_parent(gdp->clk_pix, clkp);
515 res = clk_set_rate(gdp->clk_pix, rate);
516 if (res < 0) {
517 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
518 rate);
519 return;
522 if (clk_prepare_enable(gdp->clk_pix)) {
523 DRM_ERROR("Failed to prepare/enable gdp\n");
524 return;
529 /* Update the NVN field of the 'right' field of the current GDP node
530 * (being used by the HW) with the address of the updated ('free') top
531 * field GDP node.
532 * - In interlaced mode the 'right' field is the bottom field as we
533 * update frames starting from their top field
534 * - In progressive mode, we update both bottom and top fields which
535 * are equal nodes.
536 * At the next VSYNC, the updated node list will be used by the HW.
538 curr_list = sti_gdp_get_current_nodes(gdp);
539 dma_updated_top = list->top_field_paddr;
540 dma_updated_btm = list->btm_field_paddr;
542 dev_dbg(gdp->dev, "Current NVN:0x%X\n",
543 readl(gdp->regs + GAM_GDP_NVN_OFFSET));
544 dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
545 (unsigned long)cma_obj->paddr,
546 readl(gdp->regs + GAM_GDP_PML_OFFSET));
548 if (!curr_list) {
549 /* First update or invalid node should directly write in the
550 * hw register */
551 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
552 sti_plane_to_str(plane));
554 writel(gdp->is_curr_top ?
555 dma_updated_btm : dma_updated_top,
556 gdp->regs + GAM_GDP_NVN_OFFSET);
557 goto end;
560 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
561 if (gdp->is_curr_top) {
562 /* Do not update in the middle of the frame, but
563 * postpone the update after the bottom field has
564 * been displayed */
565 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
566 } else {
567 /* Direct update to avoid one frame delay */
568 writel(dma_updated_top,
569 gdp->regs + GAM_GDP_NVN_OFFSET);
571 } else {
572 /* Direct update for progressive to avoid one frame delay */
573 writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
576 end:
577 plane->status = STI_PLANE_UPDATED;
580 static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
581 struct drm_plane_state *oldstate)
583 struct sti_plane *plane = to_sti_plane(drm_plane);
584 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
586 if (!drm_plane->crtc) {
587 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
588 drm_plane->base.id);
589 return;
592 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
593 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
594 drm_plane->base.id, sti_plane_to_str(plane));
596 plane->status = STI_PLANE_DISABLING;
599 static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
600 .atomic_update = sti_gdp_atomic_update,
601 .atomic_disable = sti_gdp_atomic_disable,
604 struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
605 struct device *dev, int desc,
606 void __iomem *baseaddr,
607 unsigned int possible_crtcs,
608 enum drm_plane_type type)
610 struct sti_gdp *gdp;
611 int res;
613 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
614 if (!gdp) {
615 DRM_ERROR("Failed to allocate memory for GDP\n");
616 return NULL;
619 gdp->dev = dev;
620 gdp->regs = baseaddr;
621 gdp->plane.desc = desc;
622 gdp->plane.status = STI_PLANE_DISABLED;
624 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
626 sti_gdp_init(gdp);
628 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
629 possible_crtcs,
630 &sti_plane_helpers_funcs,
631 gdp_supported_formats,
632 ARRAY_SIZE(gdp_supported_formats),
633 type);
634 if (res) {
635 DRM_ERROR("Failed to initialize universal plane\n");
636 goto err;
639 drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
641 sti_plane_init_property(&gdp->plane, type);
643 return &gdp->plane.drm_plane;
645 err:
646 devm_kfree(dev, gdp);
647 return NULL;