hardware/intel/intel-driver
Revisão | 32de8cf8314d6ae2377230f9863078de0e9a3635 (tree) |
---|---|
Hora | 2014-11-11 19:57:19 |
Autor | Gwenole Beauchesne <gwenole.beauchesne@inte...> |
Commiter | Gwenole Beauchesne |
vpp: enable advanced video scaling in VPP pipelines too.
Honour advanced video scaling. i.e. propagate vaPutSurface() scaling
flags, but also VPP filter flags. Also enable the sharp 8x8 filter for
high-quality scaling options, while adaptive video scaling is disabled
(bypassed) for now.
Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne@intel.com>
@@ -1236,12 +1236,13 @@ int hsw_veb_post_format_convert(VADriverContextP ctx, | ||
1236 | 1236 | vpp_surface_convert(ctx, obj_surface, proc_ctx->surface_output_object); |
1237 | 1237 | |
1238 | 1238 | } else if(proc_ctx->format_convert_flags & POST_SCALING_CONVERT) { |
1239 | + VAProcPipelineParameterBuffer * const pipe = proc_ctx->pipeline_param; | |
1239 | 1240 | /* scaling, convert and copy NV12 to YV12/IMC3/IMC2/RGBA output*/ |
1240 | 1241 | assert(obj_surface->fourcc == VA_FOURCC_NV12); |
1241 | 1242 | |
1242 | 1243 | /* first step :surface scaling */ |
1243 | 1244 | vpp_surface_scaling(ctx, obj_surface, |
1244 | - proc_ctx->surface_output_scaled_object, 0); | |
1245 | + proc_ctx->surface_output_scaled_object, pipe->filter_flags); | |
1245 | 1246 | |
1246 | 1247 | /* second step: color format convert and copy to output */ |
1247 | 1248 | obj_surface = proc_ctx->surface_output_object; |
@@ -895,7 +895,7 @@ gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_con | ||
895 | 895 | |
896 | 896 | sx = (float)dst_rect->width / src_rect->width; |
897 | 897 | sy = (float)dst_rect->height / src_rect->height; |
898 | - avs_update_coefficients(avs, sx, sy, 0); | |
898 | + avs_update_coefficients(avs, sx, sy, pp_context->filter_flags); | |
899 | 899 | |
900 | 900 | assert(avs->config->num_phases == 16); |
901 | 901 | for (i = 0; i <= 16; i++) { |
@@ -962,7 +962,8 @@ gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_con | ||
962 | 962 | intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1); |
963 | 963 | } |
964 | 964 | |
965 | - sampler_8x8->dw152.default_sharpness_level = 0; | |
965 | + sampler_8x8->dw152.default_sharpness_level = | |
966 | + -avs_is_needed(pp_context->filter_flags); | |
966 | 967 | sampler_8x8->dw153.adaptive_filter_for_all_channel = 1; |
967 | 968 | sampler_8x8->dw153.bypass_y_adaptive_filtering = 1; |
968 | 969 | sampler_8x8->dw153.bypass_x_adaptive_filtering = 1; |
@@ -2483,7 +2483,7 @@ pp_nv12_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context | ||
2483 | 2483 | |
2484 | 2484 | sx = (float)dst_rect->width / src_rect->width; |
2485 | 2485 | sy = (float)dst_rect->height / src_rect->height; |
2486 | - avs_update_coefficients(avs, sx, sy, 0); | |
2486 | + avs_update_coefficients(avs, sx, sy, pp_context->filter_flags); | |
2487 | 2487 | |
2488 | 2488 | assert(avs->config->num_phases == 16); |
2489 | 2489 | for (i = 0; i <= 16; i++) { |
@@ -2545,7 +2545,8 @@ pp_nv12_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context | ||
2545 | 2545 | /* Adaptive filter for all channels (DW4.15) */ |
2546 | 2546 | sampler_8x8_state->coefficients[0].dw4.table_1x_filter_c1 = 1U << 7; |
2547 | 2547 | |
2548 | - sampler_8x8_state->dw136.default_sharpness_level = 0; | |
2548 | + sampler_8x8_state->dw136.default_sharpness_level = | |
2549 | + -avs_is_needed(pp_context->filter_flags); | |
2549 | 2550 | sampler_8x8_state->dw137.ilk.bypass_y_adaptive_filtering = 1; |
2550 | 2551 | sampler_8x8_state->dw137.ilk.bypass_x_adaptive_filtering = 1; |
2551 | 2552 | dri_bo_unmap(pp_context->sampler_state_table.bo_8x8); |
@@ -2838,7 +2839,7 @@ gen7_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_con | ||
2838 | 2839 | |
2839 | 2840 | sx = (float)dst_rect->width / src_rect->width; |
2840 | 2841 | sy = (float)dst_rect->height / src_rect->height; |
2841 | - avs_update_coefficients(avs, sx, sy, 0); | |
2842 | + avs_update_coefficients(avs, sx, sy, pp_context->filter_flags); | |
2842 | 2843 | |
2843 | 2844 | assert(avs->config->num_phases == 16); |
2844 | 2845 | for (i = 0; i <= 16; i++) { |
@@ -2897,7 +2898,8 @@ gen7_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_con | ||
2897 | 2898 | intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1); |
2898 | 2899 | } |
2899 | 2900 | |
2900 | - sampler_8x8_state->dw136.default_sharpness_level = 0; | |
2901 | + sampler_8x8_state->dw136.default_sharpness_level = | |
2902 | + -avs_is_needed(pp_context->filter_flags); | |
2901 | 2903 | if (IS_HASWELL(i965->intel.device_info)) { |
2902 | 2904 | sampler_8x8_state->dw137.hsw.adaptive_filter_for_all_channel = 1; |
2903 | 2905 | sampler_8x8_state->dw137.hsw.bypass_y_adaptive_filtering = 1; |
@@ -4809,6 +4811,8 @@ i965_scaling_processing( | ||
4809 | 4811 | if (HAS_VPP(i965)) { |
4810 | 4812 | struct i965_surface src_surface; |
4811 | 4813 | struct i965_surface dst_surface; |
4814 | + struct i965_post_processing_context *pp_context; | |
4815 | + unsigned int filter_flags; | |
4812 | 4816 | |
4813 | 4817 | _i965LockMutex(&i965->pp_mutex); |
4814 | 4818 |
@@ -4819,10 +4823,16 @@ i965_scaling_processing( | ||
4819 | 4823 | dst_surface.type = I965_SURFACE_TYPE_SURFACE; |
4820 | 4824 | dst_surface.flags = I965_SURFACE_FLAG_FRAME; |
4821 | 4825 | |
4822 | - va_status = i965_post_processing_internal(ctx, i965->pp_context, | |
4826 | + pp_context = i965->pp_context; | |
4827 | + filter_flags = pp_context->filter_flags; | |
4828 | + pp_context->filter_flags = va_flags; | |
4829 | + | |
4830 | + va_status = i965_post_processing_internal(ctx, pp_context, | |
4823 | 4831 | &src_surface, src_rect, &dst_surface, dst_rect, |
4824 | 4832 | avs_is_needed(va_flags) ? PP_NV12_AVS : PP_NV12_SCALING, NULL); |
4825 | 4833 | |
4834 | + pp_context->filter_flags = filter_flags; | |
4835 | + | |
4826 | 4836 | _i965UnlockMutex(&i965->pp_mutex); |
4827 | 4837 | } |
4828 | 4838 |
@@ -4849,6 +4859,7 @@ i965_post_processing( | ||
4849 | 4859 | VAStatus status; |
4850 | 4860 | struct i965_surface src_surface; |
4851 | 4861 | struct i965_surface dst_surface; |
4862 | + struct i965_post_processing_context *pp_context; | |
4852 | 4863 | |
4853 | 4864 | /* Currently only support post processing for NV12 surface */ |
4854 | 4865 | if (obj_surface->fourcc != VA_FOURCC_NV12) |
@@ -4856,6 +4867,8 @@ i965_post_processing( | ||
4856 | 4867 | |
4857 | 4868 | _i965LockMutex(&i965->pp_mutex); |
4858 | 4869 | |
4870 | + pp_context = i965->pp_context; | |
4871 | + pp_context->filter_flags = va_flags; | |
4859 | 4872 | if (avs_is_needed(va_flags)) { |
4860 | 4873 | struct i965_render_state *render_state = &i965->render_state; |
4861 | 4874 | struct intel_region *dest_region = render_state->draw_region; |
@@ -4877,13 +4890,13 @@ i965_post_processing( | ||
4877 | 4890 | obj_surface = SURFACE(out_surface_id); |
4878 | 4891 | assert(obj_surface); |
4879 | 4892 | i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC_NV12, SUBSAMPLE_YUV420); |
4880 | - i965_vpp_clear_surface(ctx, i965->pp_context, obj_surface, 0); | |
4893 | + i965_vpp_clear_surface(ctx, pp_context, obj_surface, 0); | |
4881 | 4894 | |
4882 | 4895 | dst_surface.base = (struct object_base *)obj_surface; |
4883 | 4896 | dst_surface.type = I965_SURFACE_TYPE_SURFACE; |
4884 | 4897 | dst_surface.flags = I965_SURFACE_FLAG_FRAME; |
4885 | 4898 | |
4886 | - i965_post_processing_internal(ctx, i965->pp_context, | |
4899 | + i965_post_processing_internal(ctx, pp_context, | |
4887 | 4900 | &src_surface, |
4888 | 4901 | src_rect, |
4889 | 4902 | &dst_surface, |
@@ -5652,6 +5665,7 @@ i965_proc_picture(VADriverContextP ctx, | ||
5652 | 5665 | NULL); |
5653 | 5666 | } else { |
5654 | 5667 | |
5668 | + proc_context->pp_context.filter_flags = pipeline_param->filter_flags; | |
5655 | 5669 | i965_post_processing_internal(ctx, &proc_context->pp_context, |
5656 | 5670 | &src_surface, |
5657 | 5671 | &src_rect, |
@@ -491,6 +491,15 @@ struct i965_post_processing_context | ||
491 | 491 | struct pp_dn_context pp_dn_context; |
492 | 492 | void *private_context; /* pointer to the current private context */ |
493 | 493 | void *pipeline_param; /* pointer to the pipeline parameter */ |
494 | + /** | |
495 | + * \ref Extra filter flags used as a fast path. | |
496 | + * | |
497 | + * This corresponds to vaPutSurface() flags, for direct rendering, | |
498 | + * or to VAProcPipelineParameterBuffer.filter_flags when the VPP | |
499 | + * interfaces are used. In the latter case, this is just a copy of | |
500 | + * that field. | |
501 | + */ | |
502 | + unsigned int filter_flags; | |
494 | 503 | |
495 | 504 | int (*pp_x_steps)(void *private_context); |
496 | 505 | int (*pp_y_steps)(void *private_context); |