2 * VVC motion vector decoder
4 * Copyright (C) 2023 Nuo Mi
5 * Copyright (C) 2022 Xu Mu
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #define IS_SAME_MV(a, b) (AV_RN64A(a) == AV_RN64A(b))
30 //check if the two luma locations belong to the same motion estimation region
31 static av_always_inline
int is_same_mer(const VVCFrameContext
*fc
, const int xN
, const int yN
, const int xP
, const int yP
)
33 const uint8_t plevel
= fc
->ps
.sps
->log2_parallel_merge_level
;
35 return xN
>> plevel
== xP
>> plevel
&&
36 yN
>> plevel
== yP
>> plevel
;
39 //return true if we have same mvs and ref_idxs
40 static av_always_inline
int compare_mv_ref_idx(const MvField
*n
, const MvField
*o
)
42 if (!o
|| n
->pred_flag
!= o
->pred_flag
)
44 for (int i
= 0; i
< 2; i
++) {
45 PredFlag mask
= i
+ 1;
46 if (n
->pred_flag
& mask
) {
47 const int same_ref_idx
= n
->ref_idx
[i
] == o
->ref_idx
[i
];
48 const int same_mv
= IS_SAME_MV(n
->mv
+ i
, o
->mv
+ i
);
49 if (!same_ref_idx
|| !same_mv
)
56 // 8.5.2.15 Temporal motion buffer compression process for collocated motion vectors
57 static av_always_inline
void mv_compression(Mv
*motion
)
59 int mv
[2] = {motion
->x
, motion
->y
};
60 for (int i
= 0; i
< 2; i
++) {
61 const int s
= mv
[i
] >> 17;
62 const int f
= av_log2((mv
[i
] ^ s
) | 31) - 4;
63 const int mask
= (-1 * (1 << f
)) >> 1;
64 const int round
= (1 << f
) >> 2;
65 mv
[i
] = (mv
[i
] + round
) & mask
;
71 void ff_vvc_mv_scale(Mv
*dst
, const Mv
*src
, int td
, int tb
)
75 td
= av_clip_int8(td
);
76 tb
= av_clip_int8(tb
);
77 tx
= (0x4000 + (abs(td
) >> 1)) / td
;
78 scale_factor
= av_clip_intp2((tb
* tx
+ 32) >> 6, 12);
79 dst
->x
= av_clip_intp2((scale_factor
* src
->x
+ 127 +
80 (scale_factor
* src
->x
< 0)) >> 8, 17);
81 dst
->y
= av_clip_intp2((scale_factor
* src
->y
+ 127 +
82 (scale_factor
* src
->y
< 0)) >> 8, 17);
85 //part of 8.5.2.12 Derivation process for collocated motion vectors
86 static int check_mvset(Mv
*mvLXCol
, Mv
*mvCol
,
88 const RefPicList
*refPicList
, int X
, int refIdxLx
,
89 const RefPicList
*refPicList_col
, int listCol
, int refidxCol
)
91 int cur_lt
= refPicList
[X
].refs
[refIdxLx
].is_lt
;
92 int col_lt
= refPicList_col
[listCol
].refs
[refidxCol
].is_lt
;
93 int col_poc_diff
, cur_poc_diff
;
95 if (cur_lt
!= col_lt
) {
101 col_poc_diff
= colPic
- refPicList_col
[listCol
].refs
[refidxCol
].poc
;
102 cur_poc_diff
= poc
- refPicList
[X
].refs
[refIdxLx
].poc
;
104 mv_compression(mvCol
);
105 if (cur_lt
|| col_poc_diff
== cur_poc_diff
) {
106 mvLXCol
->x
= av_clip_intp2(mvCol
->x
, 17);
107 mvLXCol
->y
= av_clip_intp2(mvCol
->y
, 17);
109 ff_vvc_mv_scale(mvLXCol
, mvCol
, col_poc_diff
, cur_poc_diff
);
114 #define CHECK_MVSET(l) \
115 check_mvset(mvLXCol, temp_col.mv + l, \
116 colPic, fc->ps.ph.poc, \
117 refPicList, X, refIdxLx, \
118 refPicList_col, L ## l, temp_col.ref_idx[l])
120 //derive NoBackwardPredFlag
121 int ff_vvc_no_backward_pred_flag(const VVCLocalContext
*lc
)
123 int check_diffpicount
= 0;
125 const RefPicList
*rpl
= lc
->sc
->rpl
;
127 for (j
= 0; j
< 2; j
++) {
128 for (i
= 0; i
< lc
->sc
->sh
.r
->num_ref_idx_active
[j
]; i
++) {
129 if (rpl
[j
].refs
[i
].poc
> lc
->fc
->ps
.ph
.poc
) {
135 return !check_diffpicount
;
138 //8.5.2.12 Derivation process for collocated motion vectors
139 static int derive_temporal_colocated_mvs(const VVCLocalContext
*lc
, MvField temp_col
,
140 int refIdxLx
, Mv
*mvLXCol
, int X
,
141 int colPic
, const RefPicList
*refPicList_col
, int sb_flag
)
143 const VVCFrameContext
*fc
= lc
->fc
;
144 const SliceContext
*sc
= lc
->sc
;
145 RefPicList
* refPicList
= sc
->rpl
;
147 if (temp_col
.pred_flag
== PF_INTRA
)
152 if (temp_col
.pred_flag
& PF_L0
)
153 return CHECK_MVSET(0);
154 else if (ff_vvc_no_backward_pred_flag(lc
) && (temp_col
.pred_flag
& PF_L1
))
155 return CHECK_MVSET(1);
157 if (temp_col
.pred_flag
& PF_L1
)
158 return CHECK_MVSET(1);
159 else if (ff_vvc_no_backward_pred_flag(lc
) && (temp_col
.pred_flag
& PF_L0
))
160 return CHECK_MVSET(0);
163 if (!(temp_col
.pred_flag
& PF_L0
))
164 return CHECK_MVSET(1);
165 else if (temp_col
.pred_flag
== PF_L0
)
166 return CHECK_MVSET(0);
167 else if (temp_col
.pred_flag
== PF_BI
) {
168 if (ff_vvc_no_backward_pred_flag(lc
)) {
170 return CHECK_MVSET(0);
172 return CHECK_MVSET(1);
174 if (!lc
->sc
->sh
.r
->sh_collocated_from_l0_flag
)
175 return CHECK_MVSET(0);
177 return CHECK_MVSET(1);
184 #define TAB_MVF(x, y) \
185 tab_mvf[((y) >> MIN_PU_LOG2) * min_pu_width + ((x) >> MIN_PU_LOG2)]
187 #define TAB_MVF_PU(v) \
188 TAB_MVF(x ## v, y ## v)
190 #define TAB_CP_MV(lx, x, y) \
191 fc->tab.cp_mv[lx][((((y) >> min_cb_log2_size) * min_cb_width + ((x) >> min_cb_log2_size)) ) * MAX_CONTROL_POINTS]
194 #define DERIVE_TEMPORAL_COLOCATED_MVS(sb_flag) \
195 derive_temporal_colocated_mvs(lc, temp_col, \
196 refIdxLx, mvLXCol, X, colPic, \
197 ff_vvc_get_ref_list(fc, ref, x, y), sb_flag)
199 //8.5.2.11 Derivation process for temporal luma motion vector prediction
200 static int temporal_luma_motion_vector(const VVCLocalContext
*lc
,
201 const int refIdxLx
, Mv
*mvLXCol
, const int X
, int check_center
, int sb_flag
)
203 const VVCFrameContext
*fc
= lc
->fc
;
204 const VVCSPS
*sps
= fc
->ps
.sps
;
205 const VVCPPS
*pps
= fc
->ps
.pps
;
206 const CodingUnit
*cu
= lc
->cu
;
207 const int subpic_idx
= lc
->sc
->sh
.r
->curr_subpic_idx
;
208 int x
, y
, x_end
, y_end
, colPic
, availableFlagLXCol
= 0;
209 int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
210 VVCFrame
*ref
= fc
->ref
->collocated_ref
;
215 memset(mvLXCol
, 0, sizeof(*mvLXCol
));
219 if (!fc
->ps
.ph
.r
->ph_temporal_mvp_enabled_flag
|| (cu
->cb_width
* cu
->cb_height
<= 32))
222 tab_mvf
= ref
->tab_dmvr_mvf
;
225 //bottom right collocated motion vector
226 x
= cu
->x0
+ cu
->cb_width
;
227 y
= cu
->y0
+ cu
->cb_height
;
229 x_end
= pps
->subpic_x
[subpic_idx
] + pps
->subpic_width
[subpic_idx
];
230 y_end
= pps
->subpic_y
[subpic_idx
] + pps
->subpic_height
[subpic_idx
];
233 (cu
->y0
>> sps
->ctb_log2_size_y
) == (y
>> sps
->ctb_log2_size_y
) &&
234 x
< x_end
&& y
< y_end
) {
237 temp_col
= TAB_MVF(x
, y
);
238 availableFlagLXCol
= DERIVE_TEMPORAL_COLOCATED_MVS(sb_flag
);
241 // derive center collocated motion vector
242 if (tab_mvf
&& !availableFlagLXCol
) {
243 x
= cu
->x0
+ (cu
->cb_width
>> 1);
244 y
= cu
->y0
+ (cu
->cb_height
>> 1);
247 temp_col
= TAB_MVF(x
, y
);
248 availableFlagLXCol
= DERIVE_TEMPORAL_COLOCATED_MVS(sb_flag
);
251 return availableFlagLXCol
;
254 void ff_vvc_set_mvf(const VVCLocalContext
*lc
, const int x0
, const int y0
, const int w
, const int h
, const MvField
*mvf
)
256 const VVCFrameContext
*fc
= lc
->fc
;
257 MvField
*tab_mvf
= fc
->tab
.mvf
;
258 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
259 const int min_pu_size
= 1 << MIN_PU_LOG2
;
260 for (int dy
= 0; dy
< h
; dy
+= min_pu_size
) {
261 for (int dx
= 0; dx
< w
; dx
+= min_pu_size
) {
262 const int x
= x0
+ dx
;
263 const int y
= y0
+ dy
;
264 TAB_MVF(x
, y
) = *mvf
;
269 void ff_vvc_set_intra_mvf(const VVCLocalContext
*lc
, const int dmvr
)
271 const VVCFrameContext
*fc
= lc
->fc
;
272 const CodingUnit
*cu
= lc
->cu
;
273 MvField
*tab_mvf
= dmvr
? fc
->ref
->tab_dmvr_mvf
: fc
->tab
.mvf
;
274 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
275 const int min_pu_size
= 1 << MIN_PU_LOG2
;
276 for (int dy
= 0; dy
< cu
->cb_height
; dy
+= min_pu_size
) {
277 for (int dx
= 0; dx
< cu
->cb_width
; dx
+= min_pu_size
) {
278 const int x
= cu
->x0
+ dx
;
279 const int y
= cu
->y0
+ dy
;
280 TAB_MVF(x
, y
).pred_flag
= PF_INTRA
;
285 //cbProfFlagLX from 8.5.5.9 Derivation process for motion vector arrays from affine control point motion vectors
286 static int derive_cb_prof_flag_lx(const VVCLocalContext
*lc
, const PredictionUnit
* pu
, int lx
, int is_fallback
)
288 const MotionInfo
* mi
= &pu
->mi
;
289 const Mv
* cp_mv
= &mi
->mv
[lx
][0];
290 if (lc
->fc
->ps
.ph
.r
->ph_prof_disabled_flag
|| is_fallback
)
292 if (mi
->motion_model_idc
== MOTION_4_PARAMS_AFFINE
) {
293 if (IS_SAME_MV(cp_mv
, cp_mv
+ 1))
296 if (mi
->motion_model_idc
== MOTION_6_PARAMS_AFFINE
) {
297 if (IS_SAME_MV(cp_mv
, cp_mv
+ 1) && IS_SAME_MV(cp_mv
, cp_mv
+ 2))
300 if (lc
->sc
->rpl
[lx
].refs
[mi
->ref_idx
[lx
]].is_scaled
)
305 typedef struct SubblockParams
{
318 static int is_fallback_mode(const SubblockParams
*sp
, const PredFlag pred_flag
)
320 const int a
= 4 * (2048 + sp
->d_hor_x
);
321 const int b
= 4 * sp
->d_hor_y
;
322 const int c
= 4 * (2048 + sp
->d_ver_y
);
323 const int d
= 4 * sp
->d_ver_x
;
324 if (pred_flag
== PF_BI
) {
325 const int max_w4
= FFMAX(0, FFMAX(a
, FFMAX(b
, a
+ b
)));
326 const int min_w4
= FFMIN(0, FFMIN(a
, FFMIN(b
, a
+ b
)));
327 const int max_h4
= FFMAX(0, FFMAX(c
, FFMAX(d
, c
+ d
)));
328 const int min_h4
= FFMIN(0, FFMIN(c
, FFMIN(d
, c
+ d
)));
329 const int bx_wx4
= ((max_w4
- min_w4
) >> 11) + 9;
330 const int bx_hx4
= ((max_h4
- min_h4
) >> 11) + 9;
331 return bx_wx4
* bx_hx4
> 225;
333 const int bx_wxh
= (FFABS(a
) >> 11) + 9;
334 const int bx_hxh
= (FFABS(d
) >> 11) + 9;
335 const int bx_wxv
= (FFABS(b
) >> 11) + 9;
336 const int bx_hxv
= (FFABS(c
) >> 11) + 9;
337 if (bx_wxh
* bx_hxh
<= 165 && bx_wxv
* bx_hxv
<= 165)
343 static void init_subblock_params(SubblockParams
*sp
, const MotionInfo
* mi
,
344 const int cb_width
, const int cb_height
, const int lx
)
346 const int log2_cbw
= av_log2(cb_width
);
347 const int log2_cbh
= av_log2(cb_height
);
348 const Mv
* cp_mv
= mi
->mv
[lx
];
349 const int num_cp_mv
= mi
->motion_model_idc
+ 1;
350 sp
->d_hor_x
= (cp_mv
[1].x
- cp_mv
[0].x
) * (1 << (MAX_CU_DEPTH
- log2_cbw
));
351 sp
->d_ver_x
= (cp_mv
[1].y
- cp_mv
[0].y
) * (1 << (MAX_CU_DEPTH
- log2_cbw
));
352 if (num_cp_mv
== 3) {
353 sp
->d_hor_y
= (cp_mv
[2].x
- cp_mv
[0].x
) * (1 << (MAX_CU_DEPTH
- log2_cbh
));
354 sp
->d_ver_y
= (cp_mv
[2].y
- cp_mv
[0].y
) * (1 << (MAX_CU_DEPTH
- log2_cbh
));
356 sp
->d_hor_y
= -sp
->d_ver_x
;
357 sp
->d_ver_y
= sp
->d_hor_x
;
359 sp
->mv_scale_hor
= (cp_mv
[0].x
) * (1 << MAX_CU_DEPTH
);
360 sp
->mv_scale_ver
= (cp_mv
[0].y
) * (1 << MAX_CU_DEPTH
);
361 sp
->cb_width
= cb_width
;
362 sp
->cb_height
= cb_height
;
363 sp
->is_fallback
= is_fallback_mode(sp
, mi
->pred_flag
);
366 static void derive_subblock_diff_mvs(const VVCLocalContext
*lc
, PredictionUnit
* pu
, const SubblockParams
* sp
, const int lx
)
368 pu
->cb_prof_flag
[lx
] = derive_cb_prof_flag_lx(lc
, pu
, lx
, sp
->is_fallback
);
369 if (pu
->cb_prof_flag
[lx
]) {
370 const int dmv_limit
= 1 << 5;
371 const int pos_offset_x
= 6 * (sp
->d_hor_x
+ sp
->d_hor_y
);
372 const int pos_offset_y
= 6 * (sp
->d_ver_x
+ sp
->d_ver_y
);
373 for (int x
= 0; x
< AFFINE_MIN_BLOCK_SIZE
; x
++) {
374 for (int y
= 0; y
< AFFINE_MIN_BLOCK_SIZE
; y
++) {
375 LOCAL_ALIGNED_8(Mv
, diff
, [1]);
376 diff
->x
= x
* (sp
->d_hor_x
* (1 << 2)) + y
* (sp
->d_hor_y
* (1 << 2)) - pos_offset_x
;
377 diff
->y
= x
* (sp
->d_ver_x
* (1 << 2)) + y
* (sp
->d_ver_y
* (1 << 2)) - pos_offset_y
;
378 ff_vvc_round_mv(diff
, 0, 8);
379 pu
->diff_mv_x
[lx
][AFFINE_MIN_BLOCK_SIZE
* y
+ x
] = av_clip(diff
->x
, -dmv_limit
+ 1, dmv_limit
- 1);
380 pu
->diff_mv_y
[lx
][AFFINE_MIN_BLOCK_SIZE
* y
+ x
] = av_clip(diff
->y
, -dmv_limit
+ 1, dmv_limit
- 1);
386 static void store_cp_mv(const VVCLocalContext
*lc
, const MotionInfo
*mi
, const int lx
)
388 VVCFrameContext
*fc
= lc
->fc
;
389 const CodingUnit
*cu
= lc
->cu
;
390 const int log2_min_cb_size
= fc
->ps
.sps
->min_cb_log2_size_y
;
391 const int min_cb_size
= fc
->ps
.sps
->min_cb_size_y
;
392 const int min_cb_width
= fc
->ps
.pps
->min_cb_width
;
393 const int num_cp_mv
= mi
->motion_model_idc
+ 1;
395 for (int dy
= 0; dy
< cu
->cb_height
; dy
+= min_cb_size
) {
396 for (int dx
= 0; dx
< cu
->cb_width
; dx
+= min_cb_size
) {
397 const int x_cb
= (cu
->x0
+ dx
) >> log2_min_cb_size
;
398 const int y_cb
= (cu
->y0
+ dy
) >> log2_min_cb_size
;
399 const int offset
= (y_cb
* min_cb_width
+ x_cb
) * MAX_CONTROL_POINTS
;
401 memcpy(&fc
->tab
.cp_mv
[lx
][offset
], mi
->mv
[lx
], sizeof(Mv
) * num_cp_mv
);
406 //8.5.5.9 Derivation process for motion vector arrays from affine control point motion vectors
407 void ff_vvc_store_sb_mvs(const VVCLocalContext
*lc
, PredictionUnit
*pu
)
409 const CodingUnit
*cu
= lc
->cu
;
410 const MotionInfo
*mi
= &pu
->mi
;
411 const int sbw
= cu
->cb_width
/ mi
->num_sb_x
;
412 const int sbh
= cu
->cb_height
/ mi
->num_sb_y
;
413 SubblockParams params
[2];
416 mvf
.pred_flag
= mi
->pred_flag
;
417 mvf
.bcw_idx
= mi
->bcw_idx
;
418 mvf
.hpel_if_idx
= mi
->hpel_if_idx
;
419 for (int i
= 0; i
< 2; i
++) {
420 const PredFlag mask
= i
+ 1;
421 if (mi
->pred_flag
& mask
) {
422 store_cp_mv(lc
, mi
, i
);
423 init_subblock_params(params
+ i
, mi
, cu
->cb_width
, cu
->cb_height
, i
);
424 derive_subblock_diff_mvs(lc
, pu
, params
+ i
, i
);
425 mvf
.ref_idx
[i
] = mi
->ref_idx
[i
];
429 for (int sby
= 0; sby
< mi
->num_sb_y
; sby
++) {
430 for (int sbx
= 0; sbx
< mi
->num_sb_x
; sbx
++) {
431 const int x0
= cu
->x0
+ sbx
* sbw
;
432 const int y0
= cu
->y0
+ sby
* sbh
;
433 for (int i
= 0; i
< 2; i
++) {
434 const PredFlag mask
= i
+ 1;
435 if (mi
->pred_flag
& mask
) {
436 const SubblockParams
* sp
= params
+ i
;
437 const int x_pos_cb
= sp
->is_fallback
? (cu
->cb_width
>> 1) : (2 + (sbx
<< MIN_CU_LOG2
));
438 const int y_pos_cb
= sp
->is_fallback
? (cu
->cb_height
>> 1) : (2 + (sby
<< MIN_CU_LOG2
));
441 mv
->x
= sp
->mv_scale_hor
+ sp
->d_hor_x
* x_pos_cb
+ sp
->d_hor_y
* y_pos_cb
;
442 mv
->y
= sp
->mv_scale_ver
+ sp
->d_ver_x
* x_pos_cb
+ sp
->d_ver_y
* y_pos_cb
;
443 ff_vvc_round_mv(mv
, 0, MAX_CU_DEPTH
);
447 ff_vvc_set_mvf(lc
, x0
, y0
, sbw
, sbh
, &mvf
);
452 void ff_vvc_store_gpm_mvf(const VVCLocalContext
*lc
, const PredictionUnit
*pu
)
454 const CodingUnit
*cu
= lc
->cu
;
455 const int angle_idx
= ff_vvc_gpm_angle_idx
[pu
->gpm_partition_idx
];
456 const int distance_idx
= ff_vvc_gpm_distance_idx
[pu
->gpm_partition_idx
];
457 const int displacement_x
= ff_vvc_gpm_distance_lut
[angle_idx
];
458 const int displacement_y
= ff_vvc_gpm_distance_lut
[(angle_idx
+ 8) % 32];
459 const int is_flip
= angle_idx
>= 13 &&angle_idx
<= 27;
460 const int shift_hor
= (angle_idx
% 16 == 8 || (angle_idx
% 16 && cu
->cb_height
>= cu
->cb_width
)) ? 0 : 1;
461 const int sign
= angle_idx
< 16 ? 1 : -1;
462 const int block_size
= 4;
463 int offset_x
= (-cu
->cb_width
) >> 1;
464 int offset_y
= (-cu
->cb_height
) >> 1;
467 offset_y
+= sign
* ((distance_idx
* cu
->cb_height
) >> 3);
469 offset_x
+= sign
* ((distance_idx
* cu
->cb_width
) >> 3);
471 for (int y
= 0; y
< cu
->cb_height
; y
+= block_size
) {
472 for (int x
= 0; x
< cu
->cb_width
; x
+= block_size
) {
473 const int motion_idx
= (((x
+ offset_x
) * (1 << 1)) + 5) * displacement_x
+
474 (((y
+ offset_y
) * (1 << 1)) + 5) * displacement_y
;
475 const int s_type
= FFABS(motion_idx
) < 32 ? 2 : (motion_idx
<= 0 ? (1 - is_flip
) : is_flip
);
476 const int pred_flag
= pu
->gpm_mv
[0].pred_flag
| pu
->gpm_mv
[1].pred_flag
;
477 const int x0
= cu
->x0
+ x
;
478 const int y0
= cu
->y0
+ y
;
481 ff_vvc_set_mvf(lc
, x0
, y0
, block_size
, block_size
, pu
->gpm_mv
+ 0);
482 else if (s_type
== 1 || (s_type
== 2 && pred_flag
!= PF_BI
))
483 ff_vvc_set_mvf(lc
, x0
, y0
, block_size
, block_size
, pu
->gpm_mv
+ 1);
485 MvField mvf
= pu
->gpm_mv
[0];
486 const MvField
*mv1
= &pu
->gpm_mv
[1];
487 const int lx
= mv1
->pred_flag
- PF_L0
;
488 mvf
.pred_flag
= PF_BI
;
489 mvf
.ref_idx
[lx
] = mv1
->ref_idx
[lx
];
490 mvf
.mv
[lx
] = mv1
->mv
[lx
];
491 ff_vvc_set_mvf(lc
, x0
, y0
, block_size
, block_size
, &mvf
);
497 void ff_vvc_store_mvf(const VVCLocalContext
*lc
, const MvField
*mvf
)
499 const CodingUnit
*cu
= lc
->cu
;
500 ff_vvc_set_mvf(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
, mvf
);
503 void ff_vvc_store_mv(const VVCLocalContext
*lc
, const MotionInfo
*mi
)
505 const CodingUnit
*cu
= lc
->cu
;
508 mvf
.hpel_if_idx
= mi
->hpel_if_idx
;
509 mvf
.bcw_idx
= mi
->bcw_idx
;
510 mvf
.pred_flag
= mi
->pred_flag
;
512 for (int i
= 0; i
< 2; i
++) {
513 const PredFlag mask
= i
+ 1;
514 if (mvf
.pred_flag
& mask
) {
515 mvf
.mv
[i
] = mi
->mv
[i
][0];
516 mvf
.ref_idx
[i
] = mi
->ref_idx
[i
];
519 ff_vvc_set_mvf(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
, &mvf
);
522 typedef enum NeighbourIdx
{
531 NB_IDX_NONE
= NUM_NBS
,
534 typedef struct Neighbour
{
542 typedef struct NeighbourContext
{
543 Neighbour neighbours
[NUM_NBS
];
544 const VVCLocalContext
*lc
;
547 static int is_available(const VVCFrameContext
*fc
, const int x0
, const int y0
)
549 const VVCSPS
*sps
= fc
->ps
.sps
;
550 const int x
= x0
>> sps
->min_cb_log2_size_y
;
551 const int y
= y0
>> sps
->min_cb_log2_size_y
;
552 const int min_cb_width
= fc
->ps
.pps
->min_cb_width
;
554 return SAMPLE_CTB(fc
->tab
.cb_width
[0], x
, y
) != 0;
557 static int is_a0_available(const VVCLocalContext
*lc
, const CodingUnit
*cu
)
559 const VVCFrameContext
*fc
= lc
->fc
;
560 const VVCSPS
*sps
= fc
->ps
.sps
;
561 const int x0b
= av_zero_extend(cu
->x0
, sps
->ctb_log2_size_y
);
562 int cand_bottom_left
;
564 if (!x0b
&& !lc
->ctb_left_flag
) {
565 cand_bottom_left
= 0;
567 const int max_y
= FFMIN(fc
->ps
.pps
->height
, ((cu
->y0
>> sps
->ctb_log2_size_y
) + 1) << sps
->ctb_log2_size_y
);
568 if (cu
->y0
+ cu
->cb_height
>= max_y
)
569 cand_bottom_left
= 0;
571 cand_bottom_left
= is_available(fc
, cu
->x0
- 1, cu
->y0
+ cu
->cb_height
);
573 return cand_bottom_left
;
576 static void init_neighbour_context(NeighbourContext
*ctx
, const VVCLocalContext
*lc
)
578 const CodingUnit
*cu
= lc
->cu
;
579 const NeighbourAvailable
*na
= &lc
->na
;
580 const int x0
= cu
->x0
;
581 const int y0
= cu
->y0
;
582 const int cb_width
= cu
->cb_width
;
583 const int cb_height
= cu
->cb_height
;
584 const int a0_available
= is_a0_available(lc
, cu
);
586 Neighbour neighbours
[NUM_NBS
] = {
587 { x0
- 1, y0
+ cb_height
, !a0_available
}, //A0
588 { x0
- 1, y0
+ cb_height
- 1, !na
->cand_left
}, //A1
589 { x0
- 1, y0
, !na
->cand_left
}, //A2
590 { x0
+ cb_width
, y0
- 1, !na
->cand_up_right
}, //B0
591 { x0
+ cb_width
- 1, y0
- 1, !na
->cand_up
}, //B1
592 { x0
- 1, y0
- 1, !na
->cand_up_left
}, //B2
593 { x0
, y0
- 1, !na
->cand_up
}, //B3
596 memcpy(ctx
->neighbours
, neighbours
, sizeof(neighbours
));
600 static av_always_inline PredMode
pred_flag_to_mode(PredFlag pred
)
602 return pred
== PF_IBC
? MODE_IBC
: (pred
== PF_INTRA
? MODE_INTRA
: MODE_INTER
);
605 static int check_available(Neighbour
*n
, const VVCLocalContext
*lc
, const int check_mer
)
607 const VVCFrameContext
*fc
= lc
->fc
;
608 const VVCSPS
*sps
= fc
->ps
.sps
;
609 const CodingUnit
*cu
= lc
->cu
;
610 const MvField
*tab_mvf
= fc
->tab
.mvf
;
611 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
615 n
->available
= !sps
->r
->sps_entropy_coding_sync_enabled_flag
|| ((n
->x
>> sps
->ctb_log2_size_y
) <= (cu
->x0
>> sps
->ctb_log2_size_y
));
616 n
->available
= n
->available
&& is_available(fc
, n
->x
, n
->y
) && cu
->pred_mode
== pred_flag_to_mode(TAB_MVF(n
->x
, n
->y
).pred_flag
);
618 n
->available
= n
->available
&& !is_same_mer(fc
, n
->x
, n
->y
, cu
->x0
, cu
->y0
);
623 static const MvField
*mv_merge_candidate(const VVCLocalContext
*lc
, const int x_cand
, const int y_cand
)
625 const VVCFrameContext
*fc
= lc
->fc
;
626 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
627 const MvField
* tab_mvf
= fc
->tab
.mvf
;
628 const MvField
*mvf
= &TAB_MVF(x_cand
, y_cand
);
633 static const MvField
* mv_merge_from_nb(NeighbourContext
*ctx
, const NeighbourIdx nb
)
635 const VVCLocalContext
*lc
= ctx
->lc
;
636 Neighbour
*n
= &ctx
->neighbours
[nb
];
638 if (check_available(n
, lc
, 1))
639 return mv_merge_candidate(lc
, n
->x
, n
->y
);
642 #define MV_MERGE_FROM_NB(nb) mv_merge_from_nb(&nctx, nb)
644 //8.5.2.3 Derivation process for spatial merging candidates
645 static int mv_merge_spatial_candidates(const VVCLocalContext
*lc
, const int merge_idx
,
646 const MvField
**nb_list
, MvField
*cand_list
, int *nb_merge_cand
)
650 NeighbourContext nctx
;
652 static NeighbourIdx nbs
[][2] = {
659 init_neighbour_context(&nctx
, lc
);
660 for (int i
= 0; i
< FF_ARRAY_ELEMS(nbs
); i
++) {
661 NeighbourIdx nb
= nbs
[i
][0];
662 NeighbourIdx old
= nbs
[i
][1];
663 cand
= nb_list
[nb
] = MV_MERGE_FROM_NB(nb
);
664 if (cand
&& !compare_mv_ref_idx(cand
, nb_list
[old
])) {
665 cand_list
[num_cands
] = *cand
;
666 if (merge_idx
== num_cands
)
671 if (num_cands
!= 4) {
672 cand
= MV_MERGE_FROM_NB(B2
);
673 if (cand
&& !compare_mv_ref_idx(cand
, nb_list
[A1
])
674 && !compare_mv_ref_idx(cand
, nb_list
[B1
])) {
675 cand_list
[num_cands
] = *cand
;
676 if (merge_idx
== num_cands
)
681 *nb_merge_cand
= num_cands
;
685 static int mv_merge_temporal_candidate(const VVCLocalContext
*lc
, MvField
*cand
)
687 const VVCFrameContext
*fc
= lc
->fc
;
688 const CodingUnit
*cu
= lc
->cu
;
690 memset(cand
, 0, sizeof(*cand
));
691 if (fc
->ps
.ph
.r
->ph_temporal_mvp_enabled_flag
&& (cu
->cb_width
* cu
->cb_height
> 32)) {
692 int available_l0
= temporal_luma_motion_vector(lc
, 0, cand
->mv
+ 0, 0, 1, 0);
693 int available_l1
= IS_B(lc
->sc
->sh
.r
) ?
694 temporal_luma_motion_vector(lc
, 0, cand
->mv
+ 1, 1, 1, 0) : 0;
695 cand
->pred_flag
= available_l0
+ (available_l1
<< 1);
697 return cand
->pred_flag
;
700 //8.5.2.6 Derivation process for history-based merging candidates
701 static int mv_merge_history_candidates(const VVCLocalContext
*lc
, const int merge_idx
,
702 const MvField
**nb_list
, MvField
*cand_list
, int *num_cands
)
704 const VVCSPS
*sps
= lc
->fc
->ps
.sps
;
705 const EntryPoint
* ep
= lc
->ep
;
706 for (int i
= 1; i
<= ep
->num_hmvp
&& (*num_cands
< sps
->max_num_merge_cand
- 1); i
++) {
707 const MvField
*h
= &ep
->hmvp
[ep
->num_hmvp
- i
];
708 const int same_motion
= i
<= 2 && (compare_mv_ref_idx(h
, nb_list
[A1
]) || compare_mv_ref_idx(h
, nb_list
[B1
]));
710 cand_list
[*num_cands
] = *h
;
711 if (merge_idx
== *num_cands
)
719 //8.5.2.4 Derivation process for pairwise average merging candidate
720 static int mv_merge_pairwise_candidate(MvField
*cand_list
, const int num_cands
, const int is_b
)
723 const int num_ref_rists
= is_b
? 2 : 1;
724 const MvField
* p0
= cand_list
+ 0;
725 const MvField
* p1
= cand_list
+ 1;
726 MvField
* cand
= cand_list
+ num_cands
;
729 for (int i
= 0; i
< num_ref_rists
; i
++) {
730 PredFlag mask
= i
+ 1;
731 if (p0
->pred_flag
& mask
) {
732 cand
->pred_flag
|= mask
;
733 cand
->ref_idx
[i
] = p0
->ref_idx
[i
];
734 if (p1
->pred_flag
& mask
) {
735 Mv
*mv
= cand
->mv
+ i
;
736 mv
->x
= p0
->mv
[i
].x
+ p1
->mv
[i
].x
;
737 mv
->y
= p0
->mv
[i
].y
+ p1
->mv
[i
].y
;
738 ff_vvc_round_mv(mv
, 0, 1);
740 cand
->mv
[i
] = p0
->mv
[i
];
742 } else if (p1
->pred_flag
& mask
) {
743 cand
->pred_flag
|= mask
;
744 cand
->mv
[i
] = p1
->mv
[i
];
745 cand
->ref_idx
[i
] = p1
->ref_idx
[i
];
748 if (cand
->pred_flag
) {
749 cand
->hpel_if_idx
= p0
->hpel_if_idx
== p1
->hpel_if_idx
? p0
->hpel_if_idx
: 0;
758 //8.5.2.5 Derivation process for zero motion vector merging candidates
759 static void mv_merge_zero_motion_candidate(const VVCLocalContext
*lc
, const int merge_idx
,
760 MvField
*cand_list
, int num_cands
)
762 const VVCSPS
*sps
= lc
->fc
->ps
.sps
;
763 const H266RawSliceHeader
*rsh
= lc
->sc
->sh
.r
;
764 const int num_ref_idx
= IS_P(rsh
) ?
765 rsh
->num_ref_idx_active
[L0
] : FFMIN(rsh
->num_ref_idx_active
[L0
], rsh
->num_ref_idx_active
[L1
]);
768 while (num_cands
< sps
->max_num_merge_cand
) {
769 MvField
*cand
= cand_list
+ num_cands
;
771 cand
->pred_flag
= PF_L0
+ (IS_B(rsh
) << 1);
772 AV_ZERO64(cand
->mv
+ 0);
773 AV_ZERO64(cand
->mv
+ 1);
774 cand
->ref_idx
[0] = zero_idx
< num_ref_idx
? zero_idx
: 0;
775 cand
->ref_idx
[1] = zero_idx
< num_ref_idx
? zero_idx
: 0;
777 cand
->hpel_if_idx
= 0;
778 if (merge_idx
== num_cands
)
785 static void mv_merge_mode(const VVCLocalContext
*lc
, const int merge_idx
, MvField
*cand_list
)
788 const MvField
*nb_list
[NUM_NBS
+ 1] = { NULL
};
790 if (mv_merge_spatial_candidates(lc
, merge_idx
, nb_list
, cand_list
, &num_cands
))
793 if (mv_merge_temporal_candidate(lc
, &cand_list
[num_cands
])) {
794 if (merge_idx
== num_cands
)
799 if (mv_merge_history_candidates(lc
, merge_idx
, nb_list
, cand_list
, &num_cands
))
802 if (mv_merge_pairwise_candidate(cand_list
, num_cands
, IS_B(lc
->sc
->sh
.r
))) {
803 if (merge_idx
== num_cands
)
808 mv_merge_zero_motion_candidate(lc
, merge_idx
, cand_list
, num_cands
);
811 //8.5.2.2 Derivation process for luma motion vectors for merge mode
812 void ff_vvc_luma_mv_merge_mode(VVCLocalContext
*lc
, const int merge_idx
, const int ciip_flag
, MvField
*mv
)
814 const CodingUnit
*cu
= lc
->cu
;
815 MvField cand_list
[MRG_MAX_NUM_CANDS
];
817 ff_vvc_set_neighbour_available(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
);
818 mv_merge_mode(lc
, merge_idx
, cand_list
);
819 *mv
= cand_list
[merge_idx
];
820 //ciip flag in not inhritable
821 mv
->ciip_flag
= ciip_flag
;
824 //8.5.4.2 Derivation process for luma motion vectors for geometric partitioning merge mode
825 void ff_vvc_luma_mv_merge_gpm(VVCLocalContext
*lc
, const int merge_gpm_idx
[2], MvField
*mv
)
827 const CodingUnit
*cu
= lc
->cu
;
828 MvField cand_list
[MRG_MAX_NUM_CANDS
];
830 const int idx
[] = { merge_gpm_idx
[0], merge_gpm_idx
[1] + (merge_gpm_idx
[1] >= merge_gpm_idx
[0]) };
832 ff_vvc_set_neighbour_available(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
);
833 mv_merge_mode(lc
, FFMAX(idx
[0], idx
[1]), cand_list
);
834 memset(mv
, 0, 2 * sizeof(*mv
));
835 for (int i
= 0; i
< 2; i
++) {
837 int mask
= lx
+ PF_L0
;
838 MvField
*cand
= cand_list
+ idx
[i
];
839 if (!(cand
->pred_flag
& mask
)) {
843 mv
[i
].pred_flag
= mask
;
844 mv
[i
].ref_idx
[lx
] = cand
->ref_idx
[lx
];
845 mv
[i
].mv
[lx
] = cand
->mv
[lx
];
850 //8.5.5.5 Derivation process for luma affine control point motion vectors from a neighbouring block
851 static void affine_cps_from_nb(const VVCLocalContext
*lc
,
852 const int x_nb
, int y_nb
, const int nbw
, const int nbh
, const int lx
,
853 Mv
*cps
, int num_cps
)
855 const VVCFrameContext
*fc
= lc
->fc
;
856 const CodingUnit
*cu
= lc
->cu
;
857 const int x0
= cu
->x0
;
858 const int y0
= cu
->y0
;
859 const int cb_width
= cu
->cb_width
;
860 const int cb_height
= cu
->cb_height
;
861 const MvField
* tab_mvf
= fc
->tab
.mvf
;
862 const int min_cb_log2_size
= fc
->ps
.sps
->min_cb_log2_size_y
;
863 const int min_cb_width
= fc
->ps
.pps
->min_cb_width
;
865 const int log2_nbw
= ff_log2(nbw
);
866 const int log2_nbh
= ff_log2(nbh
);
867 const int is_ctb_boundary
= !((y_nb
+ nbh
) % fc
->ps
.sps
->ctb_size_y
) && (y_nb
+ nbh
== y0
);
869 int mv_scale_hor
, mv_scale_ver
, d_hor_x
, d_ver_x
, d_hor_y
, d_ver_y
, motion_model_idc_nb
;
870 if (is_ctb_boundary
) {
871 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
872 l
= &TAB_MVF(x_nb
, y_nb
+ nbh
- 1).mv
[lx
];
873 r
= &TAB_MVF(x_nb
+ nbw
- 1, y_nb
+ nbh
- 1).mv
[lx
];
875 const int x
= x_nb
>> min_cb_log2_size
;
876 const int y
= y_nb
>> min_cb_log2_size
;
877 motion_model_idc_nb
= SAMPLE_CTB(fc
->tab
.mmi
, x
, y
);
879 l
= &TAB_CP_MV(lx
, x_nb
, y_nb
);
880 r
= &TAB_CP_MV(lx
, x_nb
+ nbw
- 1, y_nb
) + 1;
882 mv_scale_hor
= l
->x
* (1 << 7);
883 mv_scale_ver
= l
->y
* (1 << 7);
884 d_hor_x
= (r
->x
- l
->x
) * (1 << (7 - log2_nbw
));
885 d_ver_x
= (r
->y
- l
->y
) * (1 << (7 - log2_nbw
));
886 if (!is_ctb_boundary
&& motion_model_idc_nb
== MOTION_6_PARAMS_AFFINE
) {
887 const Mv
* lb
= &TAB_CP_MV(lx
, x_nb
, y_nb
+ nbh
- 1) + 2;
888 d_hor_y
= (lb
->x
- l
->x
) * (1 << (7 - log2_nbh
));
889 d_ver_y
= (lb
->y
- l
->y
) * (1 << (7 - log2_nbh
));
895 if (is_ctb_boundary
) {
898 cps
[0].x
= mv_scale_hor
+ d_hor_x
* (x0
- x_nb
) + d_hor_y
* (y0
- y_nb
);
899 cps
[0].y
= mv_scale_ver
+ d_ver_x
* (x0
- x_nb
) + d_ver_y
* (y0
- y_nb
);
900 cps
[1].x
= mv_scale_hor
+ d_hor_x
* (x0
+ cb_width
- x_nb
) + d_hor_y
* (y0
- y_nb
);
901 cps
[1].y
= mv_scale_ver
+ d_ver_x
* (x0
+ cb_width
- x_nb
) + d_ver_y
* (y0
- y_nb
);
903 cps
[2].x
= mv_scale_hor
+ d_hor_x
* (x0
- x_nb
) + d_hor_y
* (y0
+ cb_height
- y_nb
);
904 cps
[2].y
= mv_scale_ver
+ d_ver_x
* (x0
- x_nb
) + d_ver_y
* (y0
+ cb_height
- y_nb
);
906 for (int i
= 0; i
< num_cps
; i
++) {
907 ff_vvc_round_mv(cps
+ i
, 0, 7);
908 ff_vvc_clip_mv(cps
+ i
);
912 //derive affine neighbour's postion, width and height,
913 static int affine_neighbour_cb(const VVCFrameContext
*fc
, const int x_nb
, const int y_nb
, int *x_cb
, int *y_cb
, int *cbw
, int *cbh
)
915 const int log2_min_cb_size
= fc
->ps
.sps
->min_cb_log2_size_y
;
916 const int min_cb_width
= fc
->ps
.pps
->min_cb_width
;
917 const int x
= x_nb
>> log2_min_cb_size
;
918 const int y
= y_nb
>> log2_min_cb_size
;
919 const int motion_model_idc
= SAMPLE_CTB(fc
->tab
.mmi
, x
, y
);
920 if (motion_model_idc
) {
921 *x_cb
= SAMPLE_CTB(fc
->tab
.cb_pos_x
[0], x
, y
);
922 *y_cb
= SAMPLE_CTB(fc
->tab
.cb_pos_y
[0], x
, y
);
923 *cbw
= SAMPLE_CTB(fc
->tab
.cb_width
[0], x
, y
);
924 *cbh
= SAMPLE_CTB(fc
->tab
.cb_height
[0], x
, y
);
926 return motion_model_idc
;
929 //part of 8.5.5.2 Derivation process for motion vectors and reference indices in subblock merge mode
930 static int affine_merge_candidate(const VVCLocalContext
*lc
, const int x_cand
, const int y_cand
, MotionInfo
* mi
)
932 const VVCFrameContext
*fc
= lc
->fc
;
933 int x
, y
, w
, h
, motion_model_idc
;
935 motion_model_idc
= affine_neighbour_cb(fc
, x_cand
, y_cand
, &x
, &y
, &w
, &h
);
936 if (motion_model_idc
) {
937 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
938 const MvField
* tab_mvf
= fc
->tab
.mvf
;
939 const MvField
*mvf
= &TAB_MVF(x
, y
);
941 mi
->bcw_idx
= mvf
->bcw_idx
;
942 mi
->pred_flag
= mvf
->pred_flag
;
943 for (int i
= 0; i
< 2; i
++) {
944 PredFlag mask
= i
+ 1;
945 if (mi
->pred_flag
& mask
) {
946 affine_cps_from_nb(lc
, x
, y
, w
, h
, i
, &mi
->mv
[i
][0], motion_model_idc
+ 1);
948 mi
->ref_idx
[i
] = mvf
->ref_idx
[i
];
950 mi
->motion_model_idc
= motion_model_idc
;
952 return motion_model_idc
;
955 static int affine_merge_from_nbs(NeighbourContext
*ctx
, const NeighbourIdx
*nbs
, const int num_nbs
, MotionInfo
* cand
)
957 const VVCLocalContext
*lc
= ctx
->lc
;
958 for (int i
= 0; i
< num_nbs
; i
++) {
959 Neighbour
*n
= &ctx
->neighbours
[nbs
[i
]];
960 if (check_available(n
, lc
, 1) && affine_merge_candidate(lc
, n
->x
, n
->y
, cand
))
965 #define AFFINE_MERGE_FROM_NBS(nbs) affine_merge_from_nbs(&nctx, nbs, FF_ARRAY_ELEMS(nbs), mi)
968 static const MvField
* derive_corner_mvf(NeighbourContext
*ctx
, const NeighbourIdx
*neighbour
, const int num_neighbour
)
970 const VVCFrameContext
*fc
= ctx
->lc
->fc
;
971 const MvField
*tab_mvf
= fc
->tab
.mvf
;
972 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
973 for (int i
= 0; i
< num_neighbour
; i
++) {
974 Neighbour
*n
= &ctx
->neighbours
[neighbour
[i
]];
975 if (check_available(n
, ctx
->lc
, 1)) {
976 return &TAB_MVF(n
->x
, n
->y
);
982 #define DERIVE_CORNER_MV(nbs) derive_corner_mvf(nctx, nbs, FF_ARRAY_ELEMS(nbs))
984 // check if the mv's and refidx are the same between A and B
985 static av_always_inline
int compare_pf_ref_idx(const MvField
*A
, const struct MvField
*B
, const struct MvField
*C
, const int lx
)
988 const PredFlag mask
= (lx
+ 1) & A
->pred_flag
;
989 if (!(B
->pred_flag
& mask
))
991 if (A
->ref_idx
[lx
] != B
->ref_idx
[lx
])
994 if (!(C
->pred_flag
& mask
))
996 if (A
->ref_idx
[lx
] != C
->ref_idx
[lx
])
1002 static av_always_inline
void sb_clip_location(const VVCLocalContext
*lc
,
1003 const int x_ctb
, const int y_ctb
, const Mv
* temp_mv
, int *x
, int *y
)
1005 const VVCFrameContext
*fc
= lc
->fc
;
1006 const VVCPPS
*pps
= fc
->ps
.pps
;
1007 const int ctb_log2_size
= fc
->ps
.sps
->ctb_log2_size_y
;
1008 const int subpic_idx
= lc
->sc
->sh
.r
->curr_subpic_idx
;
1009 const int x_end
= pps
->subpic_x
[subpic_idx
] + pps
->subpic_width
[subpic_idx
];
1010 const int y_end
= pps
->subpic_y
[subpic_idx
] + pps
->subpic_height
[subpic_idx
];
1012 *x
= av_clip(*x
+ temp_mv
->x
, x_ctb
, FFMIN(x_end
- 1, x_ctb
+ (1 << ctb_log2_size
) + 3)) & ~7;
1013 *y
= av_clip(*y
+ temp_mv
->y
, y_ctb
, FFMIN(y_end
- 1, y_ctb
+ (1 << ctb_log2_size
) - 1)) & ~7;
1016 static void sb_temproal_luma_motion(const VVCLocalContext
*lc
,
1017 const int x_ctb
, const int y_ctb
, const Mv
*temp_mv
,
1018 int x
, int y
, uint8_t *pred_flag
, Mv
*mv
)
1022 const int refIdxLx
= 0;
1023 const VVCFrameContext
*fc
= lc
->fc
;
1024 const VVCSH
*sh
= &lc
->sc
->sh
;
1025 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1026 VVCFrame
*ref
= fc
->ref
->collocated_ref
;
1027 MvField
*tab_mvf
= ref
->tab_dmvr_mvf
;
1028 int colPic
= ref
->poc
;
1031 sb_clip_location(lc
, x_ctb
, y_ctb
, temp_mv
, &x
, &y
);
1033 temp_col
= TAB_MVF(x
, y
);
1035 *pred_flag
= DERIVE_TEMPORAL_COLOCATED_MVS(1);
1039 *pred_flag
|= (DERIVE_TEMPORAL_COLOCATED_MVS(1)) << 1;
1043 //8.5.5.4 Derivation process for subblock-based temporal merging base motion data
1044 static int sb_temporal_luma_motion_data(const VVCLocalContext
*lc
, const MvField
*a1
,
1045 const int x_ctb
, const int y_ctb
, MvField
*ctr_mvf
, Mv
*temp_mv
)
1047 const VVCFrameContext
*fc
= lc
->fc
;
1048 const RefPicList
*rpl
= lc
->sc
->rpl
;
1049 const CodingUnit
*cu
= lc
->cu
;
1050 const int x
= cu
->x0
+ cu
->cb_width
/ 2;
1051 const int y
= cu
->y0
+ cu
->cb_height
/ 2;
1052 const VVCFrame
*ref
= fc
->ref
->collocated_ref
;
1056 memset(temp_mv
, 0, sizeof(*temp_mv
));
1059 memset(ctr_mvf
, 0, sizeof(*ctr_mvf
));
1066 if ((a1
->pred_flag
& PF_L0
) && colPic
== rpl
[L0
].refs
[a1
->ref_idx
[L0
]].poc
)
1067 *temp_mv
= a1
->mv
[0];
1068 else if ((a1
->pred_flag
& PF_L1
) && colPic
== rpl
[L1
].refs
[a1
->ref_idx
[L1
]].poc
)
1069 *temp_mv
= a1
->mv
[1];
1070 ff_vvc_round_mv(temp_mv
, 0, 4);
1072 sb_temproal_luma_motion(lc
, x_ctb
, y_ctb
, temp_mv
, x
, y
, &ctr_mvf
->pred_flag
, ctr_mvf
->mv
);
1074 return ctr_mvf
->pred_flag
;
1078 //8.5.5.3 Derivation process for subblock-based temporal merging candidates
1079 static int sb_temporal_merge_candidate(const VVCLocalContext
* lc
, NeighbourContext
*nctx
, PredictionUnit
*pu
)
1081 const VVCFrameContext
*fc
= lc
->fc
;
1082 const CodingUnit
*cu
= lc
->cu
;
1083 const VVCSPS
*sps
= fc
->ps
.sps
;
1084 const VVCPH
*ph
= &fc
->ps
.ph
;
1085 MotionInfo
*mi
= &pu
->mi
;
1086 const int ctb_log2_size
= sps
->ctb_log2_size_y
;
1087 const int x0
= cu
->x0
;
1088 const int y0
= cu
->y0
;
1089 const NeighbourIdx n
= A1
;
1092 LOCAL_ALIGNED_8(Mv
, temp_mv
, [1]);
1093 const int x_ctb
= (x0
>> ctb_log2_size
) << ctb_log2_size
;
1094 const int y_ctb
= (y0
>> ctb_log2_size
) << ctb_log2_size
;
1097 if (!ph
->r
->ph_temporal_mvp_enabled_flag
||
1098 !sps
->r
->sps_sbtmvp_enabled_flag
||
1099 (cu
->cb_width
< 8 && cu
->cb_height
< 8))
1102 mi
->num_sb_x
= cu
->cb_width
>> 3;
1103 mi
->num_sb_y
= cu
->cb_height
>> 3;
1105 a1
= derive_corner_mvf(nctx
, &n
, 1);
1106 if (sb_temporal_luma_motion_data(lc
, a1
, x_ctb
, y_ctb
, &ctr_mvf
, temp_mv
)) {
1107 const int sbw
= cu
->cb_width
/ mi
->num_sb_x
;
1108 const int sbh
= cu
->cb_height
/ mi
->num_sb_y
;
1110 for (int sby
= 0; sby
< mi
->num_sb_y
; sby
++) {
1111 for (int sbx
= 0; sbx
< mi
->num_sb_x
; sbx
++) {
1112 int x
= x0
+ sbx
* sbw
;
1113 int y
= y0
+ sby
* sbh
;
1114 sb_temproal_luma_motion(lc
, x_ctb
, y_ctb
, temp_mv
, x
+ sbw
/ 2, y
+ sbh
/ 2, &mvf
.pred_flag
, mvf
.mv
);
1115 if (!mvf
.pred_flag
) {
1116 mvf
.pred_flag
= ctr_mvf
.pred_flag
;
1117 memcpy(mvf
.mv
, ctr_mvf
.mv
, sizeof(mvf
.mv
));
1119 ff_vvc_set_mvf(lc
, x
, y
, sbw
, sbh
, &mvf
);
1127 static int affine_merge_const1(const MvField
*c0
, const MvField
*c1
, const MvField
*c2
, MotionInfo
*mi
)
1129 if (c0
&& c1
&& c2
) {
1131 for (int i
= 0; i
< 2; i
++) {
1132 PredFlag mask
= i
+ 1;
1133 if (compare_pf_ref_idx(c0
, c1
, c2
, i
)) {
1134 mi
->pred_flag
|= mask
;
1135 mi
->ref_idx
[i
] = c0
->ref_idx
[i
];
1136 mi
->mv
[i
][0] = c0
->mv
[i
];
1137 mi
->mv
[i
][1] = c1
->mv
[i
];
1138 mi
->mv
[i
][2] = c2
->mv
[i
];
1141 if (mi
->pred_flag
) {
1142 if (mi
->pred_flag
== PF_BI
)
1143 mi
->bcw_idx
= c0
->bcw_idx
;
1144 mi
->motion_model_idc
= MOTION_6_PARAMS_AFFINE
;
1151 static int affine_merge_const2(const MvField
*c0
, const MvField
*c1
, const MvField
*c3
, MotionInfo
*mi
)
1153 if (c0
&& c1
&& c3
) {
1155 for (int i
= 0; i
< 2; i
++) {
1156 PredFlag mask
= i
+ 1;
1157 if (compare_pf_ref_idx(c0
, c1
, c3
, i
)) {
1158 mi
->pred_flag
|= mask
;
1159 mi
->ref_idx
[i
] = c0
->ref_idx
[i
];
1160 mi
->mv
[i
][0] = c0
->mv
[i
];
1161 mi
->mv
[i
][1] = c1
->mv
[i
];
1162 mi
->mv
[i
][2].x
= c3
->mv
[i
].x
+ c0
->mv
[i
].x
- c1
->mv
[i
].x
;
1163 mi
->mv
[i
][2].y
= c3
->mv
[i
].y
+ c0
->mv
[i
].y
- c1
->mv
[i
].y
;
1164 ff_vvc_clip_mv(&mi
->mv
[i
][2]);
1167 if (mi
->pred_flag
) {
1168 mi
->bcw_idx
= mi
->pred_flag
== PF_BI
? c0
->bcw_idx
: 0;
1169 mi
->motion_model_idc
= MOTION_6_PARAMS_AFFINE
;
1176 static int affine_merge_const3(const MvField
*c0
, const MvField
*c2
, const MvField
*c3
, MotionInfo
*mi
)
1178 if (c0
&& c2
&& c3
) {
1180 for (int i
= 0; i
< 2; i
++) {
1181 PredFlag mask
= i
+ 1;
1182 if (compare_pf_ref_idx(c0
, c2
, c3
, i
)) {
1183 mi
->pred_flag
|= mask
;
1184 mi
->ref_idx
[i
] = c0
->ref_idx
[i
];
1185 mi
->mv
[i
][0] = c0
->mv
[i
];
1186 mi
->mv
[i
][1].x
= c3
->mv
[i
].x
+ c0
->mv
[i
].x
- c2
->mv
[i
].x
;
1187 mi
->mv
[i
][1].y
= c3
->mv
[i
].y
+ c0
->mv
[i
].y
- c2
->mv
[i
].y
;
1188 ff_vvc_clip_mv(&mi
->mv
[i
][1]);
1189 mi
->mv
[i
][2] = c2
->mv
[i
];
1192 if (mi
->pred_flag
) {
1193 mi
->bcw_idx
= mi
->pred_flag
== PF_BI
? c0
->bcw_idx
: 0;
1194 mi
->motion_model_idc
= MOTION_6_PARAMS_AFFINE
;
1201 static int affine_merge_const4(const MvField
*c1
, const MvField
*c2
, const MvField
*c3
, MotionInfo
*mi
)
1203 if (c1
&& c2
&& c3
) {
1205 for (int i
= 0; i
< 2; i
++) {
1206 PredFlag mask
= i
+ 1;
1207 if (compare_pf_ref_idx(c1
, c2
, c3
, i
)) {
1208 mi
->pred_flag
|= mask
;
1209 mi
->ref_idx
[i
] = c1
->ref_idx
[i
];
1210 mi
->mv
[i
][0].x
= c1
->mv
[i
].x
+ c2
->mv
[i
].x
- c3
->mv
[i
].x
;
1211 mi
->mv
[i
][0].y
= c1
->mv
[i
].y
+ c2
->mv
[i
].y
- c3
->mv
[i
].y
;
1212 ff_vvc_clip_mv(&mi
->mv
[i
][0]);
1213 mi
->mv
[i
][1] = c1
->mv
[i
];
1214 mi
->mv
[i
][2] = c2
->mv
[i
];
1217 if (mi
->pred_flag
) {
1218 mi
->bcw_idx
= mi
->pred_flag
== PF_BI
? c1
->bcw_idx
: 0;
1219 mi
->motion_model_idc
= MOTION_6_PARAMS_AFFINE
;
1226 static int affine_merge_const5(const MvField
*c0
, const MvField
*c1
, MotionInfo
*mi
)
1230 for (int i
= 0; i
< 2; i
++) {
1231 PredFlag mask
= i
+ 1;
1232 if (compare_pf_ref_idx(c0
, c1
, NULL
, i
)) {
1233 mi
->pred_flag
|= mask
;
1234 mi
->ref_idx
[i
] = c0
->ref_idx
[i
];
1235 mi
->mv
[i
][0] = c0
->mv
[i
];
1236 mi
->mv
[i
][1] = c1
->mv
[i
];
1239 if (mi
->pred_flag
) {
1240 if (mi
->pred_flag
== PF_BI
)
1241 mi
->bcw_idx
= c0
->bcw_idx
;
1242 mi
->motion_model_idc
= MOTION_4_PARAMS_AFFINE
;
1249 static int affine_merge_const6(const MvField
* c0
, const MvField
* c2
, const int cb_width
, const int cb_height
, MotionInfo
*mi
)
1252 const int shift
= 7 + av_log2(cb_width
) - av_log2(cb_height
);
1254 for (int i
= 0; i
< 2; i
++) {
1255 PredFlag mask
= i
+ 1;
1256 if (compare_pf_ref_idx(c0
, c2
, NULL
, i
)) {
1257 mi
->pred_flag
|= mask
;
1258 mi
->ref_idx
[i
] = c0
->ref_idx
[i
];
1259 mi
->mv
[i
][0] = c0
->mv
[i
];
1260 mi
->mv
[i
][1].x
= (c0
->mv
[i
].x
* (1 << 7)) + ((c2
->mv
[i
].y
- c0
->mv
[i
].y
) * (1 << shift
));
1261 mi
->mv
[i
][1].y
= (c0
->mv
[i
].y
* (1 << 7)) - ((c2
->mv
[i
].x
- c0
->mv
[i
].x
) * (1 << shift
));
1262 ff_vvc_round_mv(&mi
->mv
[i
][1], 0, 7);
1263 ff_vvc_clip_mv(&mi
->mv
[i
][1]);
1266 if (mi
->pred_flag
) {
1267 if (mi
->pred_flag
== PF_BI
)
1268 mi
->bcw_idx
= c0
->bcw_idx
;
1269 mi
->motion_model_idc
= MOTION_4_PARAMS_AFFINE
;
1276 static void affine_merge_zero_motion(const VVCLocalContext
*lc
, MotionInfo
*mi
)
1278 const CodingUnit
*cu
= lc
->cu
;
1280 memset(mi
, 0, sizeof(*mi
));
1281 mi
->pred_flag
= PF_L0
+ (IS_B(lc
->sc
->sh
.r
) << 1);
1282 mi
->motion_model_idc
= MOTION_4_PARAMS_AFFINE
;
1283 mi
->num_sb_x
= cu
->cb_width
>> MIN_PU_LOG2
;
1284 mi
->num_sb_y
= cu
->cb_height
>> MIN_PU_LOG2
;
1287 //8.5.5.6 Derivation process for constructed affine control point motion vector merging candidates
1288 static int affine_merge_const_candidates(const VVCLocalContext
*lc
, MotionInfo
*mi
,
1289 NeighbourContext
*nctx
, const int merge_subblock_idx
, int num_cands
)
1291 const VVCFrameContext
*fc
= lc
->fc
;
1292 const CodingUnit
*cu
= lc
->cu
;
1293 const NeighbourIdx tl
[] = { B2
, B3
, A2
};
1294 const NeighbourIdx tr
[] = { B1
, B0
};
1295 const NeighbourIdx bl
[] = { A1
, A0
};
1296 const MvField
*c0
, *c1
, *c2
;
1298 c0
= DERIVE_CORNER_MV(tl
);
1299 c1
= DERIVE_CORNER_MV(tr
);
1300 c2
= DERIVE_CORNER_MV(bl
);
1302 if (fc
->ps
.sps
->r
->sps_6param_affine_enabled_flag
) {
1303 MvField corner3
, *c3
= NULL
;
1305 if (affine_merge_const1(c0
, c1
, c2
, mi
)) {
1306 if (merge_subblock_idx
== num_cands
)
1311 memset(&corner3
, 0, sizeof(corner3
));
1312 if (fc
->ps
.ph
.r
->ph_temporal_mvp_enabled_flag
){
1313 const int available_l0
= temporal_luma_motion_vector(lc
, 0, corner3
.mv
+ 0, 0, 0, 0);
1314 const int available_l1
= (lc
->sc
->sh
.r
->sh_slice_type
== VVC_SLICE_TYPE_B
) ?
1315 temporal_luma_motion_vector(lc
, 0, corner3
.mv
+ 1, 1, 0, 0) : 0;
1317 corner3
.pred_flag
= available_l0
+ (available_l1
<< 1);
1318 if (corner3
.pred_flag
)
1323 if (affine_merge_const2(c0
, c1
, c3
, mi
)) {
1324 if (merge_subblock_idx
== num_cands
)
1330 if (affine_merge_const3(c0
, c2
, c3
, mi
)) {
1331 if (merge_subblock_idx
== num_cands
)
1337 if (affine_merge_const4(c1
, c2
, c3
, mi
)) {
1338 if (merge_subblock_idx
== num_cands
)
1345 if (affine_merge_const5(c0
, c1
, mi
)) {
1346 if (merge_subblock_idx
== num_cands
)
1351 if (affine_merge_const6(c0
, c2
, cu
->cb_width
, cu
->cb_height
, mi
)) {
1352 if (merge_subblock_idx
== num_cands
)
1358 //8.5.5.2 Derivation process for motion vectors and reference indices in subblock merge mode
1359 //return 1 if candidate is SbCol
1360 static int sb_mv_merge_mode(const VVCLocalContext
*lc
, const int merge_subblock_idx
, PredictionUnit
*pu
)
1362 const VVCSPS
*sps
= lc
->fc
->ps
.sps
;
1363 const CodingUnit
*cu
= lc
->cu
;
1364 MotionInfo
*mi
= &pu
->mi
;
1366 NeighbourContext nctx
;
1368 init_neighbour_context(&nctx
, lc
);
1371 if (sb_temporal_merge_candidate(lc
, &nctx
, pu
)) {
1372 if (merge_subblock_idx
== num_cands
)
1377 pu
->inter_affine_flag
= 1;
1378 mi
->num_sb_x
= cu
->cb_width
>> MIN_PU_LOG2
;
1379 mi
->num_sb_y
= cu
->cb_height
>> MIN_PU_LOG2
;
1381 if (sps
->r
->sps_affine_enabled_flag
) {
1382 const NeighbourIdx ak
[] = { A0
, A1
};
1383 const NeighbourIdx bk
[] = { B0
, B1
, B2
};
1385 if (AFFINE_MERGE_FROM_NBS(ak
)) {
1386 if (merge_subblock_idx
== num_cands
)
1392 if (AFFINE_MERGE_FROM_NBS(bk
)) {
1393 if (merge_subblock_idx
== num_cands
)
1399 if (affine_merge_const_candidates(lc
, mi
, &nctx
, merge_subblock_idx
, num_cands
))
1403 affine_merge_zero_motion(lc
, mi
);
1407 void ff_vvc_sb_mv_merge_mode(VVCLocalContext
*lc
, const int merge_subblock_idx
, PredictionUnit
*pu
)
1409 const CodingUnit
*cu
= lc
->cu
;
1410 ff_vvc_set_neighbour_available(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
);
1411 if (!sb_mv_merge_mode(lc
, merge_subblock_idx
, pu
)) {
1412 ff_vvc_store_sb_mvs(lc
, pu
);
1416 static int mvp_candidate(const VVCLocalContext
*lc
, const int x_cand
, const int y_cand
,
1417 const int lx
, const int8_t *ref_idx
, Mv
*mv
)
1419 const VVCFrameContext
*fc
= lc
->fc
;
1420 const RefPicList
*rpl
= lc
->sc
->rpl
;
1421 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1422 const MvField
* tab_mvf
= fc
->tab
.mvf
;
1423 const MvField
*mvf
= &TAB_MVF(x_cand
, y_cand
);
1424 const PredFlag maskx
= lx
+ 1;
1425 const int poc
= rpl
[lx
].refs
[ref_idx
[lx
]].poc
;
1428 if ((mvf
->pred_flag
& maskx
) && rpl
[lx
].refs
[mvf
->ref_idx
[lx
]].poc
== poc
) {
1433 const PredFlag masky
= ly
+ 1;
1434 if ((mvf
->pred_flag
& masky
) && rpl
[ly
].refs
[mvf
->ref_idx
[ly
]].poc
== poc
) {
1443 static int affine_mvp_candidate(const VVCLocalContext
*lc
,
1444 const int x_cand
, const int y_cand
, const int lx
, const int8_t *ref_idx
,
1445 Mv
*cps
, const int num_cp
)
1447 const VVCFrameContext
*fc
= lc
->fc
;
1448 int x_nb
, y_nb
, nbw
, nbh
, motion_model_idc
, available
= 0;
1450 motion_model_idc
= affine_neighbour_cb(fc
, x_cand
, y_cand
, &x_nb
, &y_nb
, &nbw
, &nbh
);
1451 if (motion_model_idc
) {
1452 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1453 const MvField
* tab_mvf
= fc
->tab
.mvf
;
1454 const MvField
*mvf
= &TAB_MVF(x_nb
, y_nb
);
1455 RefPicList
* rpl
= lc
->sc
->rpl
;
1456 const PredFlag maskx
= lx
+ 1;
1457 const int poc
= rpl
[lx
].refs
[ref_idx
[lx
]].poc
;
1459 if ((mvf
->pred_flag
& maskx
) && rpl
[lx
].refs
[mvf
->ref_idx
[lx
]].poc
== poc
) {
1461 affine_cps_from_nb(lc
, x_nb
, y_nb
, nbw
, nbh
, lx
, cps
, num_cp
);
1464 const PredFlag masky
= ly
+ 1;
1465 if ((mvf
->pred_flag
& masky
) && rpl
[ly
].refs
[mvf
->ref_idx
[ly
]].poc
== poc
) {
1467 affine_cps_from_nb(lc
, x_nb
, y_nb
, nbw
, nbh
, ly
, cps
, num_cp
);
1475 static int mvp_from_nbs(NeighbourContext
*ctx
,
1476 const NeighbourIdx
*nbs
, const int num_nbs
, const int lx
, const int8_t *ref_idx
, const int amvr_shift
,
1477 Mv
*cps
, const int num_cps
)
1479 const VVCLocalContext
*lc
= ctx
->lc
;
1482 for (int i
= 0; i
< num_nbs
; i
++) {
1483 Neighbour
*n
= &ctx
->neighbours
[nbs
[i
]];
1484 if (check_available(n
, lc
, 0)) {
1486 available
= affine_mvp_candidate(lc
, n
->x
, n
->y
, lx
, ref_idx
, cps
, num_cps
);
1488 available
= mvp_candidate(lc
, n
->x
, n
->y
, lx
, ref_idx
, cps
);
1490 for (int c
= 0; c
< num_cps
; c
++)
1491 ff_vvc_round_mv(cps
+ c
, amvr_shift
, amvr_shift
);
1499 //get mvp from neighbours
1500 #define AFFINE_MVP_FROM_NBS(nbs) \
1501 mvp_from_nbs(&nctx, nbs, FF_ARRAY_ELEMS(nbs), lx, ref_idx, amvr_shift, cps, num_cp) \
1503 #define MVP_FROM_NBS(nbs) \
1504 mvp_from_nbs(&nctx, nbs, FF_ARRAY_ELEMS(nbs), lx, ref_idx, amvr_shift, mv, 1) \
1506 static int mvp_spatial_candidates(const VVCLocalContext *lc,
1507 const int mvp_lx_flag
, const int lx
, const int8_t* ref_idx
, const int amvr_shift
,
1508 Mv
* mv
, int *nb_merge_cand
)
1510 const NeighbourIdx ak
[] = { A0
, A1
};
1511 const NeighbourIdx bk
[] = { B0
, B1
, B2
};
1512 NeighbourContext nctx
;
1513 int available_a
, num_cands
= 0;
1514 LOCAL_ALIGNED_8(Mv
, mv_a
, [1]);
1516 init_neighbour_context(&nctx
, lc
);
1518 available_a
= MVP_FROM_NBS(ak
);
1520 if (mvp_lx_flag
== num_cands
)
1525 if (MVP_FROM_NBS(bk
)) {
1526 if (!available_a
|| !IS_SAME_MV(mv_a
, mv
)) {
1527 if (mvp_lx_flag
== num_cands
)
1532 *nb_merge_cand
= num_cands
;
1536 static int mvp_temporal_candidates(const VVCLocalContext
* lc
,
1537 const int mvp_lx_flag
, const int lx
, const int8_t *ref_idx
, const int amvr_shift
,
1538 Mv
* mv
, int *num_cands
)
1540 if (temporal_luma_motion_vector(lc
, ref_idx
[lx
], mv
, lx
, 1, 0)) {
1541 if (mvp_lx_flag
== *num_cands
) {
1542 ff_vvc_round_mv(mv
, amvr_shift
, amvr_shift
);
1551 static int mvp_history_candidates(const VVCLocalContext
*lc
,
1552 const int mvp_lx_flag
, const int lx
, const int8_t ref_idx
, const int amvr_shift
,
1553 Mv
*mv
, int num_cands
)
1555 const EntryPoint
* ep
= lc
->ep
;
1556 const RefPicList
* rpl
= lc
->sc
->rpl
;
1557 const int poc
= rpl
[lx
].refs
[ref_idx
].poc
;
1559 if (ep
->num_hmvp
== 0)
1561 for (int i
= 1; i
<= FFMIN(4, ep
->num_hmvp
); i
++) {
1562 const MvField
* h
= &ep
->hmvp
[i
- 1];
1563 for (int j
= 0; j
< 2; j
++) {
1564 const int ly
= (j
? !lx
: lx
);
1565 PredFlag mask
= PF_L0
+ ly
;
1566 if ((h
->pred_flag
& mask
) && poc
== rpl
[ly
].refs
[h
->ref_idx
[ly
]].poc
) {
1567 if (mvp_lx_flag
== num_cands
) {
1569 ff_vvc_round_mv(mv
, amvr_shift
, amvr_shift
);
1579 //8.5.2.8 Derivation process for luma motion vector prediction
1580 static void mvp(const VVCLocalContext
*lc
, const int mvp_lx_flag
, const int lx
,
1581 const int8_t *ref_idx
, const int amvr_shift
, Mv
*mv
)
1585 if (mvp_spatial_candidates(lc
, mvp_lx_flag
, lx
, ref_idx
, amvr_shift
, mv
, &num_cands
))
1588 if (mvp_temporal_candidates(lc
, mvp_lx_flag
, lx
, ref_idx
, amvr_shift
, mv
, &num_cands
))
1591 if (mvp_history_candidates(lc
, mvp_lx_flag
, lx
, ref_idx
[lx
], amvr_shift
, mv
, num_cands
))
1594 memset(mv
, 0, sizeof(*mv
));
1597 void ff_vvc_mvp(VVCLocalContext
*lc
, const int *mvp_lx_flag
, const int amvr_shift
, MotionInfo
*mi
)
1599 const CodingUnit
*cu
= lc
->cu
;
1603 ff_vvc_set_neighbour_available(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
);
1604 if (mi
->pred_flag
!= PF_L1
)
1605 mvp(lc
, mvp_lx_flag
[L0
], L0
, mi
->ref_idx
, amvr_shift
, &mi
->mv
[L0
][0]);
1606 if (mi
->pred_flag
!= PF_L0
)
1607 mvp(lc
, mvp_lx_flag
[L1
], L1
, mi
->ref_idx
, amvr_shift
, &mi
->mv
[L1
][0]);
1610 static int ibc_spatial_candidates(const VVCLocalContext
*lc
, const int merge_idx
, Mv
*const cand_list
, int *nb_merge_cand
)
1612 const CodingUnit
*cu
= lc
->cu
;
1613 const VVCFrameContext
*fc
= lc
->fc
;
1614 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1615 const MvField
*tab_mvf
= fc
->tab
.mvf
;
1616 const int is_gt4by4
= (cu
->cb_width
* cu
->cb_height
) > 16;
1619 NeighbourContext nctx
;
1620 Neighbour
*a1
= &nctx
.neighbours
[A1
];
1621 Neighbour
*b1
= &nctx
.neighbours
[B1
];
1628 init_neighbour_context(&nctx
, lc
);
1630 if (check_available(a1
, lc
, 0)) {
1631 cand_list
[num_cands
++] = TAB_MVF(a1
->x
, a1
->y
).mv
[L0
];
1632 if (num_cands
> merge_idx
)
1635 if (check_available(b1
, lc
, 0)) {
1636 const MvField
*mvf
= &TAB_MVF(b1
->x
, b1
->y
);
1637 if (!num_cands
|| !IS_SAME_MV(&cand_list
[0], mvf
->mv
)) {
1638 cand_list
[num_cands
++] = mvf
->mv
[L0
];
1639 if (num_cands
> merge_idx
)
1644 *nb_merge_cand
= num_cands
;
1648 static int ibc_history_candidates(const VVCLocalContext
*lc
,
1649 const int merge_idx
, Mv
*cand_list
, int *nb_merge_cand
)
1651 const CodingUnit
*cu
= lc
->cu
;
1652 const EntryPoint
*ep
= lc
->ep
;
1653 const int is_gt4by4
= (cu
->cb_width
* cu
->cb_height
) > 16;
1654 int num_cands
= *nb_merge_cand
;
1656 for (int i
= 1; i
<= ep
->num_hmvp_ibc
; i
++) {
1657 int same_motion
= 0;
1658 const MvField
*mvf
= &ep
->hmvp_ibc
[ep
->num_hmvp_ibc
- i
];
1659 for (int j
= 0; j
< *nb_merge_cand
; j
++) {
1660 same_motion
= is_gt4by4
&& i
== 1 && IS_SAME_MV(&mvf
->mv
[L0
], &cand_list
[j
]);
1665 cand_list
[num_cands
++] = mvf
->mv
[L0
];
1666 if (num_cands
> merge_idx
)
1671 *nb_merge_cand
= num_cands
;
1676 #define IBC_SHIFT(v) ((v) >= (1 << (MV_BITS - 1)) ? ((v) - (1 << MV_BITS)) : (v))
1678 static inline void ibc_add_mvp(Mv
*mv
, Mv
*mvp
, const int amvr_shift
)
1680 ff_vvc_round_mv(mv
, amvr_shift
, 0);
1681 ff_vvc_round_mv(mvp
, amvr_shift
, amvr_shift
);
1682 mv
->x
= IBC_SHIFT(mv
->x
+ mvp
->x
);
1683 mv
->y
= IBC_SHIFT(mv
->y
+ mvp
->y
);
1686 static void ibc_merge_candidates(VVCLocalContext
*lc
, const int merge_idx
, Mv
*mv
)
1688 const CodingUnit
*cu
= lc
->cu
;
1689 LOCAL_ALIGNED_8(Mv
, cand_list
, [MRG_MAX_NUM_CANDS
]);
1692 ff_vvc_set_neighbour_available(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
);
1693 if (ibc_spatial_candidates(lc
, merge_idx
, cand_list
, &nb_cands
) ||
1694 ibc_history_candidates(lc
, merge_idx
, cand_list
, &nb_cands
)) {
1695 *mv
= cand_list
[merge_idx
];
1700 memset(mv
, 0, sizeof(*mv
));
1703 static int ibc_check_mv(VVCLocalContext
*lc
, Mv
*mv
)
1705 const VVCFrameContext
*fc
= lc
->fc
;
1706 const VVCSPS
*sps
= lc
->fc
->ps
.sps
;
1707 const CodingUnit
*cu
= lc
->cu
;
1708 const Mv
*bv
= &cu
->pu
.mi
.mv
[L0
][0];
1710 if (sps
->ctb_size_y
< ((cu
->y0
+ (bv
->y
>> 4)) & (sps
->ctb_size_y
- 1)) + cu
->cb_height
) {
1711 av_log(fc
->log_ctx
, AV_LOG_ERROR
, "IBC region spans multiple CTBs.\n");
1712 return AVERROR_INVALIDDATA
;
1718 int ff_vvc_mvp_ibc(VVCLocalContext
*lc
, const int mvp_l0_flag
, const int amvr_shift
, Mv
*mv
)
1720 LOCAL_ALIGNED_8(Mv
, mvp
, [1]);
1722 ibc_merge_candidates(lc
, mvp_l0_flag
, mvp
);
1723 ibc_add_mvp(mv
, mvp
, amvr_shift
);
1724 return ibc_check_mv(lc
, mv
);
1727 int ff_vvc_luma_mv_merge_ibc(VVCLocalContext
*lc
, const int merge_idx
, Mv
*mv
)
1729 ibc_merge_candidates(lc
, merge_idx
, mv
);
1730 return ibc_check_mv(lc
, mv
);
1733 static int affine_mvp_constructed_cp(NeighbourContext
*ctx
,
1734 const NeighbourIdx
*neighbour
, const int num_neighbour
,
1735 const int lx
, const int8_t ref_idx
, const int amvr_shift
, Mv
*cp
)
1737 const VVCLocalContext
*lc
= ctx
->lc
;
1738 const VVCFrameContext
*fc
= lc
->fc
;
1739 const MvField
*tab_mvf
= fc
->tab
.mvf
;
1740 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1741 const RefPicList
* rpl
= lc
->sc
->rpl
;
1744 for (int i
= 0; i
< num_neighbour
; i
++) {
1745 Neighbour
*n
= &ctx
->neighbours
[neighbour
[i
]];
1746 if (check_available(n
, ctx
->lc
, 0)) {
1747 const PredFlag maskx
= lx
+ 1;
1748 const MvField
* mvf
= &TAB_MVF(n
->x
, n
->y
);
1749 const int poc
= rpl
[lx
].refs
[ref_idx
].poc
;
1750 if ((mvf
->pred_flag
& maskx
) && rpl
[lx
].refs
[mvf
->ref_idx
[lx
]].poc
== poc
) {
1755 const PredFlag masky
= ly
+ 1;
1756 if ((mvf
->pred_flag
& masky
) && rpl
[ly
].refs
[mvf
->ref_idx
[ly
]].poc
== poc
) {
1762 ff_vvc_round_mv(cp
, amvr_shift
, amvr_shift
);
1770 #define AFFINE_MVP_CONSTRUCTED_CP(cands, cp) \
1771 affine_mvp_constructed_cp(nctx, cands, FF_ARRAY_ELEMS(cands), lx, ref_idx, \
1774 //8.5.5.8 Derivation process for constructed affine control point motion vector prediction candidates
1775 static int affine_mvp_const1(NeighbourContext
* nctx
,
1776 const int lx
, const int8_t ref_idx
, const int amvr_shift
,
1777 Mv
*cps
, int *available
)
1779 const NeighbourIdx tl
[] = { B2
, B3
, A2
};
1780 const NeighbourIdx tr
[] = { B1
, B0
};
1781 const NeighbourIdx bl
[] = { A1
, A0
};
1783 available
[0] = AFFINE_MVP_CONSTRUCTED_CP(tl
, cps
+ 0);
1784 available
[1] = AFFINE_MVP_CONSTRUCTED_CP(tr
, cps
+ 1);
1785 available
[2] = AFFINE_MVP_CONSTRUCTED_CP(bl
, cps
+ 2);
1786 return available
[0] && available
[1];
1790 static void affine_mvp_const2(const int idx
, Mv
*cps
, const int num_cp
)
1792 const Mv mv
= cps
[idx
];
1793 for (int j
= 0; j
< num_cp
; j
++)
1797 //8.5.5.7 Derivation process for luma affine control point motion vector predictors
1798 static void affine_mvp(const VVCLocalContext
*lc
,
1799 const int mvp_lx_flag
, const int lx
, const int8_t *ref_idx
, const int amvr_shift
,
1800 MotionModelIdc motion_model_idc
, Mv
*cps
)
1802 const NeighbourIdx ak
[] = { A0
, A1
};
1803 const NeighbourIdx bk
[] = { B0
, B1
, B2
};
1804 const int num_cp
= motion_model_idc
+ 1;
1805 NeighbourContext nctx
;
1806 int available
[MAX_CONTROL_POINTS
];
1809 init_neighbour_context(&nctx
, lc
);
1811 if (AFFINE_MVP_FROM_NBS(ak
)) {
1812 if (mvp_lx_flag
== num_cands
)
1817 if (AFFINE_MVP_FROM_NBS(bk
)) {
1818 if (mvp_lx_flag
== num_cands
)
1824 if (affine_mvp_const1(&nctx
, lx
, ref_idx
[lx
], amvr_shift
, cps
, available
)) {
1825 if (available
[2] || motion_model_idc
== MOTION_4_PARAMS_AFFINE
) {
1826 if (mvp_lx_flag
== num_cands
)
1833 for (int i
= 2; i
>= 0; i
--) {
1835 if (mvp_lx_flag
== num_cands
) {
1836 affine_mvp_const2(i
, cps
, num_cp
);
1842 if (temporal_luma_motion_vector(lc
, ref_idx
[lx
], cps
, lx
, 1, 0)) {
1843 if (mvp_lx_flag
== num_cands
) {
1844 ff_vvc_round_mv(cps
, amvr_shift
, amvr_shift
);
1845 for (int i
= 1; i
< num_cp
; i
++)
1853 memset(cps
, 0, num_cp
* sizeof(Mv
));
1856 void ff_vvc_affine_mvp(VVCLocalContext
*lc
, const int *mvp_lx_flag
, const int amvr_shift
, MotionInfo
*mi
)
1858 const CodingUnit
*cu
= lc
->cu
;
1860 mi
->num_sb_x
= cu
->cb_width
>> MIN_PU_LOG2
;
1861 mi
->num_sb_y
= cu
->cb_height
>> MIN_PU_LOG2
;
1863 ff_vvc_set_neighbour_available(lc
, cu
->x0
, cu
->y0
, cu
->cb_width
, cu
->cb_height
);
1864 if (mi
->pred_flag
!= PF_L1
)
1865 affine_mvp(lc
, mvp_lx_flag
[L0
], L0
, mi
->ref_idx
, amvr_shift
, mi
->motion_model_idc
, &mi
->mv
[L0
][0]);
1866 if (mi
->pred_flag
!= PF_L0
)
1867 affine_mvp(lc
, mvp_lx_flag
[L1
], L1
, mi
->ref_idx
, amvr_shift
, mi
->motion_model_idc
, &mi
->mv
[L1
][0]);
1870 //8.5.2.14 Rounding process for motion vectors
1871 void ff_vvc_round_mv(Mv
*mv
, const int lshift
, const int rshift
)
1874 const int offset
= 1 << (rshift
- 1);
1875 mv
->x
= ((mv
->x
+ offset
- (mv
->x
>= 0)) >> rshift
) * (1 << lshift
);
1876 mv
->y
= ((mv
->y
+ offset
- (mv
->y
>= 0)) >> rshift
) * (1 << lshift
);
1878 mv
->x
= mv
->x
* (1 << lshift
);
1879 mv
->y
= mv
->y
* (1 << lshift
);
1883 void ff_vvc_clip_mv(Mv
*mv
)
1885 mv
->x
= av_clip(mv
->x
, -(1 << 17), (1 << 17) - 1);
1886 mv
->y
= av_clip(mv
->y
, -(1 << 17), (1 << 17) - 1);
1889 //8.5.2.1 Derivation process for motion vector components and reference indices
1890 static av_always_inline
int is_greater_mer(const VVCFrameContext
*fc
, const int x0
, const int y0
, const int x0_br
, const int y0_br
)
1892 const uint8_t plevel
= fc
->ps
.sps
->log2_parallel_merge_level
;
1894 return x0_br
>> plevel
> x0
>> plevel
&&
1895 y0_br
>> plevel
> y0
>> plevel
;
1898 static void update_hmvp(MvField
*hmvp
, int *num_hmvp
, const MvField
*mvf
,
1899 int (*compare
)(const MvField
*n
, const MvField
*o
))
1902 for (i
= 0; i
< *num_hmvp
; i
++) {
1903 if (compare(mvf
, hmvp
+ i
)) {
1908 if (i
== MAX_NUM_HMVP_CANDS
) {
1913 memmove(hmvp
+ i
, hmvp
+ i
+ 1, (*num_hmvp
- i
) * sizeof(MvField
));
1914 hmvp
[(*num_hmvp
)++] = *mvf
;
1917 static int compare_l0_mv(const MvField
*n
, const MvField
*o
)
1919 return IS_SAME_MV(&n
->mv
[L0
], &o
->mv
[L0
]);
1922 //8.6.2.4 Derivation process for IBC history-based block vector candidates
1923 //8.5.2.16 Updating process for the history-based motion vector predictor candidate list
1924 void ff_vvc_update_hmvp(VVCLocalContext
*lc
, const MotionInfo
*mi
)
1926 const VVCFrameContext
*fc
= lc
->fc
;
1927 const CodingUnit
*cu
= lc
->cu
;
1928 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1929 const MvField
*tab_mvf
= fc
->tab
.mvf
;
1930 EntryPoint
*ep
= lc
->ep
;
1932 if (cu
->pred_mode
== MODE_IBC
) {
1933 if (cu
->cb_width
* cu
->cb_height
<= 16)
1935 update_hmvp(ep
->hmvp_ibc
, &ep
->num_hmvp_ibc
, &TAB_MVF(cu
->x0
, cu
->y0
), compare_l0_mv
);
1937 if (!is_greater_mer(fc
, cu
->x0
, cu
->y0
, cu
->x0
+ cu
->cb_width
, cu
->y0
+ cu
->cb_height
))
1939 update_hmvp(ep
->hmvp
, &ep
->num_hmvp
, &TAB_MVF(cu
->x0
, cu
->y0
), compare_mv_ref_idx
);
1943 MvField
* ff_vvc_get_mvf(const VVCFrameContext
*fc
, const int x0
, const int y0
)
1945 const int min_pu_width
= fc
->ps
.pps
->min_pu_width
;
1946 MvField
* tab_mvf
= fc
->tab
.mvf
;
1948 return &TAB_MVF(x0
, y0
);