diff --git a/drivers/media/platform/cix/Kconfig b/drivers/media/platform/cix/Kconfig index 6f9eeea57e57..2a8d56bea1c2 100644 --- a/drivers/media/platform/cix/Kconfig +++ b/drivers/media/platform/cix/Kconfig @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only source "drivers/media/platform/cix/linlon-aeu/Kconfig" + +source "drivers/media/platform/cix/cix_vpu/Kconfig" diff --git a/drivers/media/platform/cix/Makefile b/drivers/media/platform/cix/Makefile index 150bb04e13d3..53512b2cd619 100755 --- a/drivers/media/platform/cix/Makefile +++ b/drivers/media/platform/cix/Makefile @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_LINLON_AEU) += linlon-aeu/ + +obj-$(CONFIG_VIDEO_LINLON) += cix_vpu/ diff --git a/drivers/media/platform/cix/cix_vpu/Kbuild b/drivers/media/platform/cix/cix_vpu/Kbuild new file mode 100755 index 000000000000..3f5396ac5b3e --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/Kbuild @@ -0,0 +1,52 @@ +########################################################### +# Set the include-path according to the defined interface. +########################################################### + +ccflags-y += -I$(src) -I$(src)/if -I$(src)/dev -I$(src)/if/v4l2 -I$(src)/external +ccflags-$(CONFIG_VIDEO_LINLON_FTRACE) += -DMVX_LOG_FTRACE_ENABLE +ccflags-$(CONFIG_VIDEO_LINLON_PRINT_FILE) += -DMVX_LOG_PRINT_FILE_ENABLE +ccflags-y += $(EXTRA_CCFLAGS) + +########################################################### +# Define build targets and what files to include. +########################################################### + +# Amvx module +obj-$(CONFIG_VIDEO_LINLON) := amvx.o + +# Add objects for if module. +if-y := if/mvx_if.o \ + if/mvx_buffer.o \ + if/mvx_firmware_cache.o \ + if/mvx_firmware.o \ + if/mvx_firmware_v2.o \ + if/mvx_firmware_v3.o \ + if/mvx_mmu.o \ + if/mvx_secure.o \ + if/mvx_session.o + +# Add external interface. +if-y += if/v4l2/mvx_ext_v4l2.o \ + if/v4l2/mvx_v4l2_buffer.o \ + if/v4l2/mvx_v4l2_session.o \ + if/v4l2/mvx_v4l2_vidioc.o \ + if/v4l2/mvx_v4l2_fops.o \ + if/v4l2/mvx_v4l2_ctrls.o + +# Add objects for dev module. +dev-y := dev/mvx_dev.o \ + dev/mvx_hwreg.o \ + dev/mvx_hwreg_v500.o \ + dev/mvx_hwreg_v550.o \ + dev/mvx_hwreg_v61.o \ + dev/mvx_hwreg_v52_v76.o \ + dev/mvx_lsid.o \ + dev/mvx_scheduler.o \ + mvx_pm_runtime.o + +# Add driver objects. +amvx-y := mvx_driver.o \ + mvx_seq.o \ + mvx_log.o \ + mvx_log_group.o \ + $(if-y) $(dev-y) diff --git a/drivers/media/platform/cix/cix_vpu/Kconfig b/drivers/media/platform/cix/cix_vpu/Kconfig new file mode 100755 index 000000000000..9a8b0ae2f02a --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/Kconfig @@ -0,0 +1,20 @@ +config VIDEO_LINLON + tristate "Linlon VPU support." + depends on VIDEO_DEV + default m + help + This enables support for the Linlon VPU family. + +config VIDEO_LINLON_FTRACE + depends on VIDEO_LINLON + bool "Send kernel space logs to ftrace." + default n + help + Send kernel space logs to ftrace. + +config VIDEO_LINLON_PRINT_FILE + depends on VIDEO_LINLON + bool "Append file and line number to kernel space log messages." + default y + help + Append file and line number to kernel space log messages. diff --git a/drivers/media/platform/cix/cix_vpu/Makefile b/drivers/media/platform/cix/cix_vpu/Makefile new file mode 100755 index 000000000000..7861be84b1bf --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/Makefile @@ -0,0 +1,11 @@ +all: mono_v4l2 + +mono_v4l2: + @env CONFIG_VIDEO_LINLON=m CONFIG_VIDEO_LINLON_MONO=y CONFIG_VIDEO_LINLON_IF_V4L2=y $(MAKE) -C $(KDIR) M=$(CURDIR) modules + +clean: + @rm -rf *.ko + @find . -type f -name '*.o' -delete + @rm -rf *.mod.c + @rm -f Module.symvers + @rm -f modules.order diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_dev.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_dev.c new file mode 100755 index 000000000000..ec4367b40768 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_dev.c @@ -0,0 +1,1280 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mvx_bitops.h" +#include "mvx_dev.h" +#include "mvx_hwreg.h" +#include "mvx_if.h" +#include "mvx_scheduler.h" +#include "mvx_session.h" +#include "mvx_log_group.h" +#include "mvx_pm_runtime.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +/** + * Name of the MVx dev device. + */ +#define MVX_DEV_NAME "amvx_dev" + +#define MVX_PCI_VENDOR 0x13b5 +#define MVX_PCI_DEVICE 0x0001 + +#define MVE_CLK_NAME "vpu_clk" +#define MVE_RST_NAME "vpu_reset" +#define MVE_RCSU_RST_NAME "vpu_rcsu_reset" + +#define MVX_MAX_NUMBER_OF_PMDOMAINS 5 + +#define VPU_CORE_ACPI_REF_POWERSOURCE 1 +#define VPU_CORE_ACPI_NAME_PREFIX "CRE" +#define VPU_CORE_ACPI_MEMREPAIR_FUNC "REPR" +#define to_acpi_device(d) container_of(d, struct acpi_device, dev) + +static uint busctrl_ref = MVE_BUSTCTRL_REF_DEFAULT; +static uint busctrl_split = MVE_BUSTCTRL_SPLIT_512; +module_param(busctrl_ref, uint, 0660); +module_param(busctrl_split, uint, 0660); + +static bool disable_dfs = 0; +module_param(disable_dfs, bool, 0660); + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct mvx_freq_table { + unsigned int cores; + unsigned long load; + unsigned long freq; +}; + +/** + * struct mvx_dev_ctx - Private context for the MVx dev device. + */ +struct mvx_dev_ctx { + struct device *dev; + struct device *pmdomains[MVX_MAX_NUMBER_OF_PMDOMAINS]; + unsigned int pmdomains_cnt; + struct clk *clk; + struct reset_control *rstc; + struct mvx_if_ops *if_ops; + struct mvx_client_ops client_ops; + struct mvx_hwreg hwreg; + struct mvx_sched scheduler; + unsigned int irq; + struct workqueue_struct *work_queue; + struct work_struct work; + unsigned long irqve; + struct dentry *dentry; + + struct device *opp_pmdomain; + struct device_link *opp_dl; + struct devfreq_dev_profile devfreq_profile; + struct devfreq *devfreq; + unsigned long target_freq; +}; + +/** + * struct mvx_client_session - Device session. + * + * When the if module registers a session this structure is returned. + */ +struct mvx_client_session { + struct mvx_dev_ctx *ctx; + struct mvx_sched_session session; +}; + +/**************************************************************************** + * Static variables and functions + ****************************************************************************/ + +const char * const vpu_pmdomains[MVX_MAX_NUMBER_OF_PMDOMAINS] = { + "vpu_top", "vpu_core0", "vpu_core1", "vpu_core2", "vpu_core3"}; + +static const struct mvx_freq_table sky1_mvx_freq_table[] = { + {4, 486000, 150000000}, // 1080P@60 + {4, 972000, 300000000}, // 1080P@120 + {4, 1458000, 480000000}, // 1080P@180 + {4, 2073600, 600000000}, // 4K@60 + {4, 4147200, 800000000}, // 4K@120 + {4, 8294400, 1200000000}, // 8K@60 +}; + +static struct mvx_dev_ctx *client_ops_to_ctx(struct mvx_client_ops *client) +{ + return container_of(client, struct mvx_dev_ctx, client_ops); +} + +static void get_hw_ver(struct mvx_client_ops *client, + struct mvx_hw_ver *hw_ver) +{ + struct mvx_dev_ctx *ctx = client_ops_to_ctx(client); + + if (IS_ERR_OR_NULL(hw_ver)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "hw_ver pointer is invalid."); + return; + } + mvx_hwreg_get_hw_ver(&ctx->hwreg, hw_ver); +} + +static void get_formats(struct mvx_client_ops *client, + enum mvx_direction direction, + uint64_t *formats) +{ + struct mvx_dev_ctx *ctx = client_ops_to_ctx(client); + uint32_t fuses; + *formats = 0; + + ctx->hwreg.ops.get_formats(direction, formats); + + /* Remove formats based on fuses. */ + fuses = mvx_hwreg_get_fuse(&ctx->hwreg); + + if (fuses & MVX_HWREG_FUSE_DISABLE_AFBC) { + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_Y_AFBC_8, formats); + mvx_clear_bit(MVX_FORMAT_Y_AFBC_10, formats); + } + + if (fuses & MVX_HWREG_FUSE_DISABLE_REAL) + mvx_clear_bit(MVX_FORMAT_RV, formats); + + if (fuses & MVX_HWREG_FUSE_DISABLE_VPX) { + mvx_clear_bit(MVX_FORMAT_VP8, formats); + mvx_clear_bit(MVX_FORMAT_VP9, formats); + } + + if (fuses & MVX_HWREG_FUSE_DISABLE_HEVC) + mvx_clear_bit(MVX_FORMAT_HEVC, formats); +} + +static unsigned int get_core_mask(struct mvx_client_ops *client) +{ + struct mvx_dev_ctx *ctx = client_ops_to_ctx(client); + + return mvx_hwreg_get_core_mask(&ctx->hwreg); +} + + +static int update_freq(struct mvx_dev_ctx *ctx) +{ + int active_ncores; + unsigned long mbs_per_sec; + struct mvx_freq_table highest = sky1_mvx_freq_table[ARRAY_SIZE(sky1_mvx_freq_table) - 1]; + int freq = 0; + int ret; + int i, j; + + ret = mvx_sched_calculate_load(&ctx->scheduler, &mbs_per_sec); + if (ret != 0) + return ret; + + if (mbs_per_sec > highest.load) { + freq = highest.freq; + } else { + active_ncores = mvx_hwreg_get_ncores(&ctx->hwreg); + for (i = 0; i < ARRAY_SIZE(sky1_mvx_freq_table); i++) { + if (sky1_mvx_freq_table[i].load >= mbs_per_sec) { + freq = sky1_mvx_freq_table[i].freq * sky1_mvx_freq_table[i].cores / active_ncores; + if (active_ncores == sky1_mvx_freq_table[i].cores) + break; + if (freq > highest.freq) { + freq = highest.freq; + break; + } + for (j = 0; j < ARRAY_SIZE(sky1_mvx_freq_table); j++) { + if (freq <= sky1_mvx_freq_table[j].freq) { + freq = sky1_mvx_freq_table[j].freq; + break; + } + } + break; + } + } + } + + ctx->target_freq = freq; + + return ret; +} + +static int update_load(struct mvx_client_session *csession) +{ + struct mvx_dev_ctx *ctx = csession->ctx; + int ret; + + if (disable_dfs) + return 0; + + ret = update_freq(ctx); + if (ret != 0) + return ret; + + if(!mutex_trylock(&ctx->devfreq->lock)) + return ret; + + ret = update_devfreq(ctx->devfreq); + mutex_unlock(&ctx->devfreq->lock); + + return ret; +} + +static struct mvx_client_session *register_session( + struct mvx_client_ops *client, + struct mvx_if_session *isession) +{ + struct mvx_dev_ctx *ctx = client_ops_to_ctx(client); + struct mvx_client_session *csession; + int ret; + + csession = devm_kzalloc(ctx->dev, sizeof(*csession), GFP_KERNEL); + if (csession == NULL) + return ERR_PTR(-ENOMEM); + + csession->ctx = ctx; + + ret = mvx_pm_runtime_get_sync(ctx->dev); + if (ret < 0) + goto free_session; + + ret = mvx_sched_session_construct(&csession->session, isession); + if (ret != 0) + goto runtime_put; + + ret = mvx_sched_add_session(&ctx->scheduler, &csession->session.session); + if (ret != 0) + goto destruct_session; + update_load(csession); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "Register client session. csession=0x%px, isession=0x%px.", + csession, isession); + + return csession; + +destruct_session: + mvx_sched_session_destruct(&csession->session); +runtime_put: + mvx_pm_runtime_put_sync(csession->ctx->dev); +free_session: + devm_kfree(ctx->dev, csession); + + return ERR_PTR(ret); +} + +static void unregister_session(struct mvx_client_session *csession) +{ + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "%px Unregister client session. csession=0x%px.", + mvx_if_session_to_session(csession->session.isession), + csession); + + mvx_sched_remove_session(&csession->ctx->scheduler, &csession->session.session); + mvx_sched_terminate(&csession->ctx->scheduler, &csession->session); + mvx_sched_session_destruct(&csession->session); + update_load(csession); + + mvx_pm_runtime_put_sync(csession->ctx->dev); + + devm_kfree(csession->ctx->dev, csession); +} + +static int switch_in(struct mvx_client_session *csession) +{ + struct mvx_dev_ctx *ctx = csession->ctx; + int ret; + + ret = mvx_sched_switch_in(&ctx->scheduler, &csession->session); + + return ret; +} + +static int switch_out_rsp(struct mvx_client_session *csession) +{ + return mvx_sched_switch_out_rsp(&csession->ctx->scheduler, &csession->session); +} + +static void terminate(struct mvx_client_session *csession) +{ + return mvx_sched_terminate(&csession->ctx->scheduler, &csession->session); +} + +static void reset_priority(struct mvx_client_session *csession) +{ + return mvx_sched_reset_priority(&csession->ctx->scheduler, &csession->session); +} + +static int send_irq(struct mvx_client_session *csession) +{ + struct mvx_dev_ctx *ctx = csession->ctx; + int ret; + + ret = mvx_sched_send_irq(&ctx->scheduler, &csession->session); + + return ret; +} + +int soft_irq(struct mvx_client_session *csession) +{ + struct mvx_dev_ctx *ctx = csession->ctx; + int ret; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "%px soft trigger irq. csession=0x%px.", + mvx_if_session_to_session(csession->session.isession), + csession); + + ret = mvx_sched_trigger_irq(&ctx->scheduler, &csession->session); + + return ret; +} + +static int flush_mmu(struct mvx_client_session *csession) +{ + struct mvx_dev_ctx *ctx = csession->ctx; + int ret; + + ret = mvx_sched_flush_mmu(&ctx->scheduler, &csession->session); + + return ret; +} + +static void print_debug(struct mvx_client_session *csession) +{ + struct mvx_dev_ctx *ctx = csession->ctx; + + mvx_sched_print_debug(&ctx->scheduler, &csession->session); +} + +static struct mvx_dev_ctx *work_to_ctx(struct work_struct *work) +{ + return container_of(work, struct mvx_dev_ctx, work); +} + +/** + * irq_bottom() - Handle IRQ bottom. + * @work: Work struct that is part of the context structure. + * + * This function is called from a work queue and id doing the actual work of + * handling the interrupt. + */ +static void irq_bottom(struct work_struct *work) +{ + struct mvx_dev_ctx *ctx = work_to_ctx(work); + uint32_t nlsid; + uint32_t i; + + nlsid = mvx_hwreg_get_nlsid(&ctx->hwreg); + for (i = 0; i < nlsid; i++) + if (test_and_clear_bit(i, &ctx->irqve)) + mvx_sched_handle_irq(&ctx->scheduler, i); +} + +/** + * irq_top() - Handle IRQ top. + * @irq: IRQ number. + * @dev_id: Pointer to context. + * + * This function is called in interrupt context. It should be short and must not + * block. + * + * Return: IRQ status if the IRQ was handled or not. + */ +static irqreturn_t irq_top(int irq, + void *dev_id) +{ + struct mvx_dev_ctx *ctx = dev_id; + uint32_t nlsid; + uint32_t irqve; + int ret = IRQ_NONE; + + nlsid = mvx_hwreg_get_nlsid(&ctx->hwreg); + irqve = mvx_hwreg_read(&ctx->hwreg, MVX_HWREG_IRQVE); + while (nlsid-- > 0) + if ((irqve >> nlsid) & 0x1) { + mvx_hwreg_write_lsid(&ctx->hwreg, + nlsid, + MVX_HWREG_LIRQVE, + 0); + mb(); + set_bit(nlsid, &ctx->irqve); + ret = IRQ_HANDLED; + } + + queue_work(ctx->work_queue, &ctx->work); + + return ret; +} + +static int mvx_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) +{ + struct mvx_dev_ctx *ctx = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + unsigned long pre_freq; + unsigned long target_freq = *freq; + int ret; + + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) { + dev_err(dev, "Failed to get recommended opp instance\n"); + ret = PTR_ERR(opp); + return ret; + } + dev_pm_opp_put(opp); + pre_freq = scmi_device_get_freq(ctx->opp_pmdomain); + ret = scmi_device_set_freq(ctx->opp_pmdomain, *freq); + atomic_set(&mvx_log_perf.freq, *freq); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_DEBUG, "%s() target=%ld, previous=%ld, current=%ld.", + __func__, target_freq, pre_freq, *freq); + + return ret; +} + +static int mvx_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct mvx_dev_ctx *ctx = dev_get_drvdata(dev); + + *freq = scmi_device_get_freq(ctx->opp_pmdomain); + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_DEBUG, "%s() %ld", __func__, *freq); + + return 0; +} + +static int mvx_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct mvx_dev_ctx *ctx = dev_get_drvdata(dev); + + update_freq(ctx); + stat->current_frequency = scmi_device_get_freq(ctx->opp_pmdomain); + stat->busy_time = ctx->target_freq; + stat->total_time = stat->current_frequency; + + return 0; +} + +static int mvx_devfreq_init(struct mvx_dev_ctx *ctx) +{ + struct dev_pm_opp *opp; + struct devfreq_dev_profile *profile; + unsigned long freq; + int opp_count; + struct devfreq_simple_ondemand_data *ondemand_data; + int i; + int ret; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "%s()", __func__); + + if (disable_dfs) + return 0; + + if (!ctx) + return -EINVAL; + + ondemand_data = devm_kzalloc(ctx->dev, sizeof(*ondemand_data), GFP_KERNEL); + if (!ondemand_data) + return -ENOMEM; + + profile = &ctx->devfreq_profile; + +#ifdef CONFIG_ARM_SCMI_SUPPORT_DT_ACPI + ctx->opp_pmdomain = fwnode_dev_pm_domain_attach_by_name(ctx->dev, "perf"); +#else + ctx->opp_pmdomain = dev_pm_domain_attach_by_name(ctx->dev, "perf"); +#endif + + if (IS_ERR_OR_NULL(ctx->opp_pmdomain)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get perf domain"); + return -EFAULT; + } + ctx->opp_dl = device_link_add(ctx->dev, ctx->opp_pmdomain, + DL_FLAG_RPM_ACTIVE | + DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS); + if (IS_ERR_OR_NULL(ctx->opp_dl)) { + ret = -ENODEV; + goto detach_opp; + } + + /* Add opps to opp power domain. */ + ret = scmi_device_opp_table_parse(ctx->opp_pmdomain); + if (ret) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to add opps to the device"); + ret = -ENODEV; + goto unlink_opp; + } + opp_count = dev_pm_opp_get_opp_count(ctx->opp_pmdomain); + if (opp_count <= 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get opps count."); + ret = -EINVAL; + goto unlink_opp; + } + profile->freq_table = kmalloc_array(opp_count, sizeof(unsigned long), GFP_KERNEL); + for (i = 0, freq = 0; i < opp_count; i++, freq++) { + opp = dev_pm_opp_find_freq_ceil(ctx->opp_pmdomain, &freq); + if (IS_ERR(opp)) + break; + dev_pm_opp_put(opp); + profile->freq_table[i] = freq; + + /* Add opps to ctx->dev, since register devfreq device as ctx->dev */ + ret = dev_pm_opp_add(ctx->dev, freq, 0); + if (ret) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to add opp %lu Hz", freq); + while (i-- > 0) { + dev_pm_opp_remove(ctx->dev, profile->freq_table[i]); + } + ret = -ENODEV; + goto free_table; + } + } + + profile->max_state = i; + profile->polling_ms = 100; + profile->target = mvx_devfreq_target; + profile->get_dev_status = mvx_devfreq_get_dev_status; + profile->get_cur_freq = mvx_devfreq_get_cur_freq; + ondemand_data->downdifferential = 1; + ondemand_data->upthreshold = 100; + ctx->devfreq = devm_devfreq_add_device(ctx->dev, profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, ondemand_data); + if (IS_ERR(ctx->devfreq)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to add devfreq device"); + ret = PTR_ERR(ctx->devfreq); + goto remove_table; + } + + ret = devm_devfreq_register_opp_notifier(ctx->dev, ctx->devfreq); + if (ret < 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to register opp notifier"); + goto remove_device; + } + + return ret; + +remove_device: + devm_devfreq_remove_device(ctx->dev, ctx->devfreq); + ctx->devfreq = NULL; +remove_table: + dev_pm_opp_remove_table(ctx->dev); + profile->max_state = 0; +free_table: + kfree(profile->freq_table); + profile->freq_table = NULL; +unlink_opp: + device_link_del(ctx->opp_dl); + ctx->opp_dl = NULL; +detach_opp: + dev_pm_domain_detach(ctx->opp_pmdomain, true); + devm_kfree(ctx->dev, ondemand_data); + + return ret; +} + +static int mvx_devfreq_remove(struct mvx_dev_ctx *ctx) +{ + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "%s()", __func__); + + if (disable_dfs) + return 0; + + if (ctx->devfreq) { + devm_devfreq_unregister_opp_notifier(ctx->dev, ctx->devfreq); + devm_kfree(ctx->dev, ctx->devfreq->data); + devm_devfreq_remove_device(ctx->dev, ctx->devfreq); + ctx->devfreq = NULL; + } + if (ctx->devfreq_profile.max_state > 0) { + dev_pm_opp_remove_table(ctx->dev); + ctx->devfreq_profile.max_state = 0; + } + if (ctx->devfreq_profile.freq_table) { + kfree(ctx->devfreq_profile.freq_table); + ctx->devfreq_profile.freq_table = NULL; + } + if (ctx->opp_dl) { + device_link_del(ctx->opp_dl); + ctx->opp_dl = NULL; + } + dev_pm_domain_detach(ctx->opp_pmdomain, true); + + return 0; +} + +#ifdef CONFIG_ACPI +#if VPU_CORE_ACPI_REF_POWERSOURCE +static void acpi_dev_pm_detach(struct device *dev, bool power_off) +{ + dev_pm_domain_set(dev, NULL); +} + +static struct dev_pm_domain acpi_vpu_pm_domain = { + .ops = { + SET_RUNTIME_PM_OPS(acpi_subsys_runtime_suspend, acpi_subsys_runtime_resume, NULL) +#ifdef CONFIG_PM_SLEEP + SET_SYSTEM_SLEEP_PM_OPS(acpi_subsys_runtime_suspend, acpi_subsys_runtime_resume) +#endif + }, + .detach = acpi_dev_pm_detach +}; +#endif +#endif + +static int mvx_dev_probe(struct device *dev, + struct resource *rcsu_res, + struct resource *res, + int irq) +{ + struct mvx_dev_ctx *ctx; + int ret; + int i; + struct device *pd; +#ifdef CONFIG_ACPI + struct fwnode_handle *child; +#endif + + /* Create device context and store pointer in device private data. */ + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) + return -EINVAL; + + ctx->dev = dev; + dev_set_drvdata(dev, ctx); + + /* Setup client ops callbacks. */ + ctx->client_ops.get_hw_ver = get_hw_ver; + ctx->client_ops.get_formats = get_formats; + ctx->client_ops.get_core_mask = get_core_mask; + ctx->client_ops.register_session = register_session; + ctx->client_ops.unregister_session = unregister_session; + ctx->client_ops.switch_in = switch_in; + ctx->client_ops.switch_out_rsp = switch_out_rsp; + ctx->client_ops.send_irq = send_irq; + ctx->client_ops.soft_irq = soft_irq; + ctx->client_ops.flush_mmu = flush_mmu; + ctx->client_ops.print_debug = print_debug; + ctx->client_ops.update_load = update_load; + ctx->client_ops.terminate = terminate; + ctx->client_ops.reset_priority = reset_priority; + + /* Create if context. */ + ctx->if_ops = mvx_if_create(dev, &ctx->client_ops, ctx); + if (IS_ERR(ctx->if_ops)) { + ret = -EINVAL; + goto free_ctx; + } + + /* Create debugfs entry */ + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + char name[20]; + + scnprintf(name, sizeof(name), "%s%u", MVX_DEV_NAME, dev->id); + ctx->dentry = debugfs_create_dir(name, NULL); + if (IS_ERR_OR_NULL(ctx->dentry)) { + ret = -EINVAL; + goto destroy_if; + } + } + + /* Construct hw register context. */ + ret = mvx_hwreg_construct(&ctx->hwreg, dev, rcsu_res, res, ctx->dentry); + if (ret != 0) + goto destruct_dentry; + + ctx->clk = devm_clk_get_optional(dev, MVE_CLK_NAME); + if (IS_ERR_OR_NULL(ctx->clk)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get clock."); + ret = -EFAULT; + goto destruct_hwreg; + } + ctx->rstc = devm_reset_control_get(dev, MVE_RST_NAME); + if (IS_ERR(ctx->rstc)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get reset_control, %s.", + MVE_RST_NAME); + ret = -EFAULT; + goto destruct_hwreg; + } + + /* Request IRQ handler. */ + ctx->irq = irq; + irq_set_status_flags(ctx->irq, IRQ_DISABLE_UNLAZY); + ret = request_irq(ctx->irq, irq_top, IRQF_SHARED, dev_name(dev), ctx); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Failed to request IRQ. irq=%u, ret=%d.", + ctx->irq, ret); + goto destruct_hwreg; + } + disable_irq(ctx->irq); + + if (has_acpi_companion(dev)) { +#ifdef CONFIG_ACPI + ctx->pmdomains[0] = dev; + i = 1; + fwnode_for_each_child_node(dev->fwnode, child) { + if (is_acpi_data_node(child)) { + continue; + } + if (!strncmp(acpi_device_bid(to_acpi_device_node(child)), + VPU_CORE_ACPI_NAME_PREFIX, ACPI_NAMESEG_SIZE - 1)) { + if (i >= MVX_MAX_NUMBER_OF_PMDOMAINS) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "pmDomains more than limits, Num:limits=[%d:%d].", + i + 1, MVX_MAX_NUMBER_OF_PMDOMAINS); + ret = -EFAULT; + goto irq_free; + } + ACPI_COMPANION_SET(&(to_acpi_device_node(child)->dev), to_acpi_device_node(child)); + ctx->pmdomains[i] = &(to_acpi_device_node(child)->dev); + to_acpi_device_node(child)->power.flags.ignore_parent = 1; +#if VPU_CORE_ACPI_REF_POWERSOURCE + pm_runtime_enable(ctx->pmdomains[i]); + dev_pm_domain_set(ctx->pmdomains[i], &acpi_vpu_pm_domain); +#endif + i++; + } + } + ctx->pmdomains_cnt = i; +#endif + } else { + ctx->pmdomains_cnt = of_count_phandle_with_args(dev->of_node, "power-domains", "#power-domain-cells"); + /*Ignore the latest opp_pmdomain which handled by devfreq */ + if (ctx->pmdomains_cnt > 1) + ctx->pmdomains_cnt -= 1; + if (ctx->pmdomains_cnt < 0 || ctx->pmdomains_cnt > MVX_MAX_NUMBER_OF_PMDOMAINS) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get pmdomains count %d", + ctx->pmdomains_cnt); + ret = -EFAULT; + goto irq_free; + } + for (i = 0; i < ctx->pmdomains_cnt; i++) { + pd = dev_pm_domain_attach_by_name(dev, vpu_pmdomains[i]); + if (IS_ERR_OR_NULL(pd)) { + ret = -EFAULT; + goto irq_free; + } + ctx->pmdomains[i] = pd; + } + } + pm_runtime_set_autosuspend_delay(ctx->pmdomains[0], 1000); + pm_runtime_use_autosuspend(ctx->pmdomains[0]); + + pm_runtime_enable(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) { + pm_runtime_set_suspended(dev); + goto runtime_disable; + } + + ret = mvx_sched_construct(&ctx->scheduler, dev, ctx->if_ops, + &ctx->hwreg, ctx->dentry); + if (ret != 0) + goto runtime_put; + + /* Create work queue for IRQ handler. */ + ctx->work_queue = alloc_workqueue(dev_name(dev), WQ_UNBOUND, 1); + if (ctx->work_queue == NULL) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Failed to create work queue."); + ret = -EINVAL; + goto destruct_sched; + } + + INIT_WORK(&ctx->work, irq_bottom); + + ret = mvx_devfreq_init(ctx); + if (ret) + goto workqueue_destroy; + + if (mvx_hwreg_get_core_mask(&ctx->hwreg) <= 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "No vpu cores available"); + ret = -ENODEV; + goto devfreq_remove; + } + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Linlon v%x identified. cores=%u, nlsid=%u, id=%u.", + mvx_hwreg_get_hw_id(&ctx->hwreg), + mvx_hwreg_get_ncores(&ctx->hwreg), + mvx_hwreg_get_nlsid(&ctx->hwreg), + dev->id); + + mvx_pm_runtime_put_sync(ctx->dev); + return 0; + +devfreq_remove: + mvx_devfreq_remove(ctx); + +workqueue_destroy: + destroy_workqueue(ctx->work_queue); + +destruct_sched: + mvx_sched_destruct(&ctx->scheduler); + +runtime_put: + pm_runtime_put_sync(dev); + +runtime_disable: + if (has_acpi_companion(dev)) { +#if VPU_CORE_ACPI_REF_POWERSOURCE + for (i = 1; i < ctx->pmdomains_cnt; i++) { + pm_runtime_disable(ctx->pmdomains[i]); + } +#endif + } + pm_runtime_disable(dev); + + for (i = 0; i < ctx->pmdomains_cnt; i++) + dev_pm_domain_detach(ctx->pmdomains[i], true); + +irq_free: + free_irq(ctx->irq, ctx); + +destruct_hwreg: + mvx_hwreg_destruct(&ctx->hwreg); + +destruct_dentry: + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(ctx->dentry); + +destroy_if: + mvx_if_destroy(ctx->if_ops); + +free_ctx: + devm_kfree(dev, ctx); + return ret; +} + +static int mvx_dev_remove(struct mvx_dev_ctx *ctx) +{ + int i; + + free_irq(ctx->irq, ctx); + mvx_devfreq_remove(ctx); + if (has_acpi_companion(ctx->dev)) { +#if VPU_CORE_ACPI_REF_POWERSOURCE + for (i = 1; i < ctx->pmdomains_cnt; i++) { + pm_runtime_disable(ctx->pmdomains[i]); + } +#endif + } + pm_runtime_disable(ctx->dev); + for (i = 0; i < ctx->pmdomains_cnt; i++) + dev_pm_domain_detach(ctx->pmdomains[i], true); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "remove"); + mvx_if_destroy(ctx->if_ops); + destroy_workqueue(ctx->work_queue); + mvx_sched_destruct(&ctx->scheduler); + mvx_hwreg_destruct(&ctx->hwreg); + dev_set_drvdata(ctx->dev, NULL); + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(ctx->dentry); + devm_kfree(ctx->dev, ctx); + return 0; +} + +/**************************************************************************** + * Platform driver + ****************************************************************************/ + +static int mvx_pdev_probe(struct platform_device *pdev) +{ + struct resource *rcsu_res = NULL; + struct resource *res = NULL; + int irq = 0; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "probe"); + rcsu_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (IS_ERR_OR_NULL(rcsu_res) || IS_ERR_OR_NULL(res)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get address of resource."); + return -ENXIO; + } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to get IRQ resource."); + return -ENXIO; + } + + return mvx_dev_probe(&pdev->dev, rcsu_res, res, irq); +} + +static int mvx_pdev_remove(struct platform_device *pdev) +{ + struct mvx_dev_ctx *ctx = platform_get_drvdata(pdev); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "pdev remove"); + + return mvx_dev_remove(ctx); +} + +static int mvx_hw_init(struct device *dev) +{ + struct mvx_dev_ctx *ctx = dev_get_drvdata(dev); + + if (IS_ERR_OR_NULL(ctx->hwreg.dev)) + return 0; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "hardware init"); + mvx_hwreg_write(&ctx->hwreg, MVX_HWREG_BUSCTRL, + busctrl_ref << MVE_BUSTCTRL_REF_SHIFT | + busctrl_split); + return 0; +} + +static int mvx_switch_enpwoff(struct mvx_dev_ctx *ctx, bool enable) +{ + uint32_t reg; + uint32_t val; + uint32_t core_mask; + + core_mask = mvx_hwreg_get_core_mask(&ctx->hwreg); + if (enable) + val = MVX_RCSU_HWREG_ENPWOFF_MASK; + else + val = ~core_mask & MVX_RCSU_HWREG_ENPWOFF_MASK; + reg = mvx_hwreg_read_rcsu(&ctx->hwreg, MVX_RCSU_HWREG_STRAP_PIN0); + reg = (reg & ((1 << MVX_RCSU_HWREG_ENPWOFF_SHIFT) - 1)) | (val << MVX_RCSU_HWREG_ENPWOFF_SHIFT); + mvx_hwreg_write_rcsu(&ctx->hwreg, MVX_RCSU_HWREG_STRAP_PIN0, reg); + + /* MVE soft reset. */ + mvx_hwreg_write(&ctx->hwreg, MVX_HWREG_RESET, 1); + /* Clear CLKFORCE, then vpu can automatically power off core if ENPWOFF is enable. */ + if (mvx_log_perf.enabled & MVX_LOG_PERF_UTILIZATION) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "Force enable core scheduler clock for performance profiling."); + mvx_hwreg_write(&ctx->hwreg, MVX_HWREG_CLKFORCE, 1 << MVE_CLKFORCE_SCHED_CLK_SHIFT); + if (!enable && mvx_log_perf.drain && mvx_log_perf.drain->reset) + mvx_log_perf.drain->reset(mvx_log_perf.drain); + } else { + mvx_hwreg_write(&ctx->hwreg, MVX_HWREG_CLKFORCE, 0); + } + + return 0; +} + +static int mvx_switch_qchannel_clock_gating(struct mvx_dev_ctx *ctx, bool enable) +{ + uint32_t reg; + + reg = mvx_hwreg_read_rcsu(&ctx->hwreg, MVX_RCSU_HWREG_PGCTRL); + reg = enable ? (reg | MVX_RCSU_HWREG_CLOCK_QCHANNEL_ENABLE) : + (reg & ~(MVX_RCSU_HWREG_CLOCK_QCHANNEL_ENABLE)); + mvx_hwreg_write_rcsu(&ctx->hwreg, MVX_RCSU_HWREG_PGCTRL, reg); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "%s enable=%d", + __func__, enable); + + return 0; +} + +#ifdef CONFIG_PM +static int mvx_pm_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mvx_dev_ctx *ctx = dev_get_drvdata(dev); + int i; + uint64_t mask = mvx_hwreg_get_core_mask(&ctx->hwreg) << 1; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_runtime_suspend"); + + mvx_if_flush_work(ctx->if_ops); + ret = mvx_sched_suspend(&ctx->scheduler); + disable_irq(ctx->irq); + + /** + * There could be called by unregister_session() from irq_bottom() -- ctx->work, + * hence, do not use cancel_work_sync() to avoid deadlock, and cancle_work() + * is safe in this case. + */ + if (current->flags & PF_WQ_WORKER) + cancel_work(&ctx->work); + else + cancel_work_sync(&ctx->work); + mvx_sched_cancel_work(&ctx->scheduler); + + mvx_switch_qchannel_clock_gating(ctx, false); + + if (ctx->hwreg.hw_ver.svn_revision == MVE_SVN_ENPWOFF) { + /* Ensure enpwoff take effect, hw reset is needed. */ + reset_control_assert(ctx->rstc); + usleep_range(10, 20); + reset_control_deassert(ctx->rstc); + mvx_switch_enpwoff(ctx, true); + } + + if (!IS_ERR_OR_NULL(ctx->clk)) + clk_disable_unprepare(ctx->clk); + + for (i = 1; i < ctx->pmdomains_cnt; i++) { + if (mvx_test_bit(i, &mask)) { + if (has_acpi_companion(ctx->pmdomains[i])) { +#if VPU_CORE_ACPI_REF_POWERSOURCE + pm_runtime_put_sync(ctx->pmdomains[i]); +#endif + } + else { + pm_runtime_put_sync(ctx->pmdomains[i]); + } + } + } + + if (!has_acpi_companion(ctx->pmdomains[0])) { + pm_runtime_put_autosuspend(ctx->pmdomains[0]); + } + + return ret; +} + +static int mvx_pm_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mvx_dev_ctx *ctx = dev_get_drvdata(dev); + int i; + uint64_t mask; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_runtime_resume"); + if (!has_acpi_companion(dev)) { + pm_runtime_get_sync(ctx->pmdomains[0]); + } + + ret = clk_prepare_enable(ctx->clk); + if (ret) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Failed to enable clock, %d.", ret); + return ret; + } + + enable_irq(ctx->irq); + + reset_control_assert(ctx->rstc); + usleep_range(10, 20); + reset_control_deassert(ctx->rstc); + + mvx_switch_qchannel_clock_gating(ctx, true); + + /*Initialize hwreg when vpu_top power on for the first time. */ + if (ctx->hwreg.hw_ver.revision == 0) { + ret = mvx_hwreg_init(&ctx->hwreg); + if (ret) + return ret; + } + + mask = mvx_hwreg_get_core_mask(&ctx->hwreg) << 1; + for (i = 1; i < ctx->pmdomains_cnt; i++) { + if (mvx_test_bit(i, &mask)) { + if (has_acpi_companion(ctx->pmdomains[i])) { +#if VPU_CORE_ACPI_REF_POWERSOURCE + pm_runtime_get_sync(ctx->pmdomains[i]); +#else + acpi_evaluate_object(to_acpi_device(ctx->pmdomains[i])->handle, + VPU_CORE_ACPI_MEMREPAIR_FUNC, NULL, NULL); +#endif + } else { + pm_runtime_get_sync(ctx->pmdomains[i]); + } + } + } + + if (ctx->hwreg.hw_ver.svn_revision == MVE_SVN_ENPWOFF) { + /* + * Memory repair is needed after each power on. + * VPU core will be power off when assert hwreset, so, memory repair must be + * after hwreset, in other words, memory repair is also needed after do + * hwreset. + * PM do memory repair when core power on. + */ + mvx_switch_enpwoff(ctx, false); + } + + ret = mvx_hw_init(dev); + if (ret) + return ret; + + if (IS_ERR_OR_NULL(ctx->scheduler.dev)) + return ret; + + queue_work(ctx->work_queue, &ctx->work); + ret = mvx_sched_resume(&ctx->scheduler); + + return ret; +} + +static int mvx_pm_runtime_idle(struct device *dev) +{ + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "mvx_pm_runtime_idle"); + return 0; +} + +static const struct dev_pm_ops mvx_dev_pm_ops = { + SET_RUNTIME_PM_OPS(mvx_pm_runtime_suspend, mvx_pm_runtime_resume, mvx_pm_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) +}; +#endif /* CONFIG_PM */ + +static const struct of_device_id mvx_dev_match_table[] = { + { .compatible = "arm,mali-mve" }, + { .compatible = "arm,mali-v500" }, + { .compatible = "arm,mali-v550" }, + { .compatible = "arm,mali-v61" }, + { .compatible = "armChina,linlon-v5" }, + { .compatible = "armChina,linlon-v6" }, + { .compatible = "armChina,linlon-v7" }, + { .compatible = "armChina,linlon-v8" }, + { { 0 } } +}; +MODULE_DEVICE_TABLE(of, mvx_dev_match_table); + +static const struct acpi_device_id mvx_dev_acpi_match_table[] = { + { "CIXH3010", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, mvx_dev_acpi_match_table); + +static struct platform_driver mvx_dev_driver = { + .probe = mvx_pdev_probe, + .remove = mvx_pdev_remove, + .driver = { + .name = MVX_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = mvx_dev_match_table, + .acpi_match_table = mvx_dev_acpi_match_table, +#ifdef CONFIG_PM + .pm = &mvx_dev_pm_ops +#endif /* CONFIG_PM */ + } +}; + +/**************************************************************************** + * PCI driver + ****************************************************************************/ + +/* LCOV_EXCL_START */ +static int mvx_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static unsigned int dev_id; + + pdev->dev.id = dev_id++; + return mvx_dev_probe(&pdev->dev, NULL, &pdev->resource[1], pdev->irq); +} + +static void mvx_pci_remove(struct pci_dev *pdev) +{ + struct mvx_dev_ctx *ctx = pci_get_drvdata(pdev); + + mvx_dev_remove(ctx); +} + +static struct pci_device_id mvx_pci_device_id[] = { + { PCI_DEVICE(MVX_PCI_VENDOR, + MVX_PCI_DEVICE) }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mvx_pci_device_id); + +static struct pci_driver mvx_pci_driver = { + .name = MVX_DEV_NAME, + .id_table = mvx_pci_device_id, + .probe = mvx_pci_probe, + .remove = mvx_pci_remove +}; +/* LCOV_EXCL_STOP */ + +/**************************************************************************** + * Exported variables and functions + ****************************************************************************/ + +int mvx_dev_init(void) +{ + int ret; + + ret = platform_driver_register(&mvx_dev_driver); + if (ret != 0) { + pr_err("mvx_dev: Failed to register driver.\n"); + return ret; + } + + /* LCOV_EXCL_START */ + ret = pci_register_driver(&mvx_pci_driver); + if (ret != 0) { + pr_err("mvx_dev: Failed to register PCI driver.\n"); + goto unregister_driver; + } + + /* LCOV_EXCL_STOP */ + + return 0; + +unregister_driver: + platform_driver_unregister(&mvx_dev_driver); /* LCOV_EXCL_LINE */ + + return ret; +} + +void mvx_dev_exit(void) +{ + pci_unregister_driver(&mvx_pci_driver); /* LCOV_EXCL_LINE */ + platform_driver_unregister(&mvx_dev_driver); +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_dev.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_dev.h new file mode 100755 index 000000000000..70ddc959e0f3 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_dev.h @@ -0,0 +1,49 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_DEV_H_ +#define _MVX_DEV_H_ + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_dev_init() - Initialize the dev device. + */ +int mvx_dev_init(void); + +/** + * mvx_dev_exit() - Remove and exit the dev device. + */ +void mvx_dev_exit(void); + +#endif /* _MVX_DEV_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg.c new file mode 100755 index 000000000000..a78958b22c83 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg.c @@ -0,0 +1,594 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include "mvx_log_group.h" +#include "mvx_hwreg.h" +#include "mvx_hwreg_v500.h" +#include "mvx_hwreg_v550.h" +#include "mvx_hwreg_v61.h" +#include "mvx_hwreg_v52_v76.h" +#include "mvx_pm_runtime.h" + +static uint hw_ncores = MVX_NUMBER_OF_CORES; +module_param(hw_ncores, uint, 0660); + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +static unsigned int get_offset(enum mvx_hwreg_what what) +{ + switch (what) { + case MVX_HWREG_HARDWARE_ID: + return 0x0; + case MVX_HWREG_ENABLE: + return 0x4; + case MVX_HWREG_NCORES: + return 0x8; + case MVX_HWREG_NLSID: + return 0xc; + case MVX_HWREG_CORELSID: + return 0x10; + case MVX_HWREG_JOBQUEUE: + return 0x14; + case MVX_HWREG_IRQVE: + return 0x18; + case MVX_HWREG_CLKFORCE: + return 0x24; + case MVX_HWREG_SVNREV: + return 0x30; + case MVX_HWREG_FUSE: + return 0x34; + case MVX_HWREG_PROTCTRL: + return 0x40; + case MVX_HWREG_BUSCTRL: + return 0x44; + case MVX_HWREG_RESET: + return 0x50; + default: + return 0; + } +} + +static unsigned int get_lsid_offset(unsigned int lsid, + enum mvx_hwreg_lsid what) +{ + unsigned int offset = 0x0200 + 0x40 * lsid; + + switch (what) { + case MVX_HWREG_CTRL: + offset += 0x0; + break; + case MVX_HWREG_MMU_CTRL: + offset += 0x4; + break; + case MVX_HWREG_NPROT: + offset += 0x8; + break; + case MVX_HWREG_ALLOC: + offset += 0xc; + break; + case MVX_HWREG_FLUSH_ALL: + offset += 0x10; + break; + case MVX_HWREG_SCHED: + offset += 0x14; + break; + case MVX_HWREG_TERMINATE: + offset += 0x18; + break; + case MVX_HWREG_LIRQVE: + offset += 0x1c; + break; + case MVX_HWREG_IRQHOST: + offset += 0x20; + break; + case MVX_HWREG_INTSIG: + offset += 0x24; + break; + case MVX_HWREG_STREAMID: + offset += 0x2c; + break; + case MVX_HWREG_BUSATTR_0: + offset += 0x30; + break; + case MVX_HWREG_BUSATTR_1: + offset += 0x34; + break; + case MVX_HWREG_BUSATTR_2: + offset += 0x38; + break; + case MVX_HWREG_BUSATTR_3: + offset += 0x3c; + break; + default: + return 0; + } + + return offset; +} + +static unsigned int get_rcsu_offset(enum mvx_rcsu_hwreg_what what) +{ + switch (what) { + case MVX_RCSU_HWREG_PGCTRL: + return 0x21c; + case MVX_RCSU_HWREG_STRAP_PIN0: + return 0x300; + case MVX_RCSU_HWREG_STRAP_PIN2: + return 0x308; + default: + return 0; + } +} + +static int mvx_hwreg_hw_ver_construct(struct mvx_hwreg *hwreg) +{ + uint32_t value; + + value = readl(hwreg->registers); + + switch (value >> 16) { + case 0x5650: + hwreg->hw_ver.id = MVE_v500; + break; + case 0x5655: + hwreg->hw_ver.id = MVE_v550; + break; + case 0x5660: + case 0x5661: + hwreg->hw_ver.id = MVE_v61; + break; + case 0x5662: + case 0x5663: + case 0x5664: + hwreg->hw_ver.id = MVE_v52_v76; + break; + default: + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Unknown hardware version. version=0x%08x.", + value); + return -EINVAL; + } + + hwreg->hw_ver.revision = (value >> 8) & 0xff; + hwreg->hw_ver.patch = value & 0xff; + hwreg->hw_ver.svn_revision = mvx_hwreg_read(hwreg, MVX_HWREG_SVNREV); + + return 0; +} + +static int regs_show(struct seq_file *s, + void *v) +{ + struct mvx_hwreg *hwreg = (struct mvx_hwreg *)s->private; + int ret; + + ret = mvx_pm_runtime_get_sync(hwreg->dev); + if (ret < 0) + return 0; + + seq_printf(s, "HARDWARE_ID = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_HARDWARE_ID)); + seq_printf(s, "ENABLE = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_ENABLE)); + seq_printf(s, "NCORES = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_NCORES)); + seq_printf(s, "NLSID = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_NLSID)); + seq_printf(s, "CORELSID = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID)); + seq_printf(s, "JOBQUEUE = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE)); + seq_printf(s, "IRQVE = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_IRQVE)); + seq_printf(s, "CLKFORCE = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_CLKFORCE)); + seq_printf(s, "SVNREV = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_SVNREV)); + seq_printf(s, "FUSE = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_FUSE)); + seq_printf(s, "PROTCTRL = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_PROTCTRL)); + seq_printf(s, "RESET = 0x%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_RESET)); + seq_puts(s, "\n"); + + mvx_pm_runtime_put_sync(hwreg->dev); + + return 0; +} + +static int regs_open(struct inode *inode, + struct file *file) +{ + return single_open(file, regs_show, inode->i_private); +} + +static const struct file_operations regs_fops = { + .open = regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static int regs_debugfs_init(struct mvx_hwreg *hwreg, + struct dentry *parent) +{ + struct dentry *dentry; + + dentry = debugfs_create_file("regs", 0400, parent, hwreg, + ®s_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + return 0; +} + +static int lsid_regs_show(struct seq_file *s, + void *v) +{ + struct mvx_lsid_hwreg *lsid_hwreg = (struct mvx_lsid_hwreg *)s->private; + struct mvx_hwreg *hwreg = lsid_hwreg->hwreg; + int lsid = lsid_hwreg->lsid; + int ret; + + ret = mvx_pm_runtime_get_sync(hwreg->dev); + if (ret < 0) + return 0; + + seq_printf(s, "CTRL = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_CTRL)); + seq_printf(s, "MMU_CTRL = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_MMU_CTRL)); + seq_printf(s, "NPROT = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_NPROT)); + seq_printf(s, "ALLOC = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_ALLOC)); + seq_printf(s, "FLUSH_ALL = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_FLUSH_ALL)); + seq_printf(s, "SCHED = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_SCHED)); + seq_printf(s, "TERMINATE = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_TERMINATE)); + seq_printf(s, "LIRQVE = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_LIRQVE)); + seq_printf(s, "IRQHOST = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_IRQHOST)); + seq_printf(s, "INTSIG = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_INTSIG)); + seq_printf(s, "STREAMID = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_STREAMID)); + seq_printf(s, "BUSATTR_0 = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_0)); + seq_printf(s, "BUSATTR_1 = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_1)); + seq_printf(s, "BUSATTR_2 = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_2)); + seq_printf(s, "BUSATTR_3 = 0x%08x\n", + mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_BUSATTR_3)); + seq_puts(s, "\n"); + + mvx_pm_runtime_put_sync(hwreg->dev); + + return 0; +} + +static int lsid_regs_open(struct inode *inode, + struct file *file) +{ + return single_open(file, lsid_regs_show, inode->i_private); +} + +static const struct file_operations lsid_regs_fops = { + .open = lsid_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static int lsid_regs_debugfs_init(struct mvx_lsid_hwreg *lsid_hwreg, + struct dentry *parent) +{ + struct dentry *dentry; + char name[20]; + + scnprintf(name, sizeof(name), "lsid%u_regs", lsid_hwreg->lsid); + + dentry = debugfs_create_file(name, 0400, parent, lsid_hwreg, + &lsid_regs_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + return 0; +} + +int debugfs_init(struct mvx_hwreg *hwreg, + struct dentry *parent) +{ + int ret; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + int lsid; + + ret = regs_debugfs_init(hwreg, parent); + if (ret != 0) + return ret; + + for (lsid = 0; lsid < MVX_LSID_MAX; ++lsid) { + ret = lsid_regs_debugfs_init(&hwreg->lsid_hwreg[lsid], + parent); + if (ret != 0) + return ret; + } + } + + return 0; +} + +static int mvx_hwreg_ops_init(struct mvx_hwreg *hwreg) +{ + enum mvx_hw_id hw_id; + + hw_id = mvx_hwreg_get_hw_id(hwreg); + switch (hw_id) { + case MVE_v500: + hwreg->ops.get_formats = mvx_hwreg_get_formats_v500; + break; + case MVE_v550: + hwreg->ops.get_formats = mvx_hwreg_get_formats_v550; + break; + case MVE_v61: + hwreg->ops.get_formats = mvx_hwreg_get_formats_v61; + break; + case MVE_v52_v76: + hwreg->ops.get_formats = mvx_hwreg_get_formats_v52_v76; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int mvx_hwreg_verify_core_mask(struct mvx_hwreg *hwreg) +{ + uint32_t bit; + uint32_t core_mask = 0; + uint32_t ncores = 0; + long unsigned int mask = (long unsigned int)hwreg->core_mask; + int active_ncores = mvx_hwreg_get_ncores(hwreg); + + /* Make sure # of cores in mask doesn't exceed # of active cores*/ + for_each_set_bit(bit, &mask, hwreg->ncores) { + core_mask |= (1 << bit); + if (++ncores >= active_ncores) + break; + } + return core_mask; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_hwreg_construct(struct mvx_hwreg *hwreg, + struct device *dev, + struct resource *rcsu_res, + struct resource *res, + struct dentry *parent) +{ + char const *name = dev_name(dev); + int ret; + int lsid; + + hwreg->dev = dev; + + hwreg->rcsu_res = request_mem_region(rcsu_res->start, resource_size(rcsu_res), name); + if (hwreg->rcsu_res == NULL) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Failed to request rcsu mem region. start=0x%llx, size=0x%llx.", + rcsu_res->start, resource_size(rcsu_res)); + return -EINVAL; + } + + hwreg->rcsu_registers = ioremap(rcsu_res->start, resource_size(rcsu_res)); + if (hwreg->rcsu_registers == NULL) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Failed to iomap region. start=0x%llx, size=0x%llx.", + rcsu_res->start, resource_size(rcsu_res)); + ret = -ENOMEM; + goto release_rcsu_mem; + } + + hwreg->res = request_mem_region(res->start, resource_size(res), name); + if (hwreg->res == NULL) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Failed to request mem region. start=0x%llx, size=0x%llx.", + res->start, resource_size(res)); + ret = -ENOMEM; + goto unmap_rcsu_io; + } + + hwreg->registers = ioremap(res->start, resource_size(res)); + if (hwreg->registers == NULL) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Failed to iomap region. start=0x%llx, size=0x%llx.", + res->start, resource_size(res)); + ret = -ENOMEM; + goto release_mem; + } + + for (lsid = 0; lsid < MVX_LSID_MAX; ++lsid) { + hwreg->lsid_hwreg[lsid].hwreg = hwreg; + hwreg->lsid_hwreg[lsid].lsid = lsid; + } + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = debugfs_init(hwreg, parent); + if (ret != 0) + goto unmap_io; + } + + init_waitqueue_head(&hwreg->wait_queue); + + return 0; + +unmap_io: + iounmap(hwreg->registers); +release_mem: + release_mem_region(res->start, resource_size(res)); +unmap_rcsu_io: + iounmap(hwreg->rcsu_registers); +release_rcsu_mem: + release_mem_region(rcsu_res->start, resource_size(rcsu_res)); + + return ret; +} + +void mvx_hwreg_destruct(struct mvx_hwreg *hwreg) +{ + iounmap(hwreg->rcsu_registers); + release_mem_region(hwreg->rcsu_res->start, resource_size(hwreg->rcsu_res)); + iounmap(hwreg->registers); + release_mem_region(hwreg->res->start, resource_size(hwreg->res)); +} + +uint32_t mvx_hwreg_read(struct mvx_hwreg *hwreg, + enum mvx_hwreg_what what) +{ + unsigned int offset = get_offset(what); + + return readl(hwreg->registers + offset); +} + +void mvx_hwreg_write(struct mvx_hwreg *hwreg, + enum mvx_hwreg_what what, + uint32_t value) +{ + unsigned int offset = get_offset(what); + + writel(value, hwreg->registers + offset); +} + +uint32_t mvx_hwreg_read_lsid(struct mvx_hwreg *hwreg, + unsigned int lsid, + enum mvx_hwreg_lsid what) +{ + unsigned int offset = get_lsid_offset(lsid, what); + + return readl(hwreg->registers + offset); +} + +void mvx_hwreg_write_lsid(struct mvx_hwreg *hwreg, + unsigned int lsid, + enum mvx_hwreg_lsid what, + uint32_t value) +{ + unsigned int offset = get_lsid_offset(lsid, what); + + writel(value, hwreg->registers + offset); +} + +uint32_t mvx_hwreg_read_rcsu(struct mvx_hwreg *hwreg, + enum mvx_rcsu_hwreg_what what) +{ + unsigned int offset = get_rcsu_offset(what); + + return readl(hwreg->rcsu_registers + offset); +} + +void mvx_hwreg_write_rcsu(struct mvx_hwreg *hwreg, + enum mvx_rcsu_hwreg_what what, + uint32_t value) +{ + unsigned int offset = get_rcsu_offset(what); + + writel(value, hwreg->rcsu_registers + offset); +} + +enum mvx_hw_id mvx_hwreg_get_hw_id(struct mvx_hwreg *hwreg) +{ + return hwreg->hw_ver.id; +} + +int mvx_hwreg_init(struct mvx_hwreg *hwreg) +{ + int ret; + + ret = mvx_hwreg_hw_ver_construct(hwreg); + if (ret) + return ret; + hwreg->fuse = mvx_hwreg_read(hwreg, MVX_HWREG_FUSE); + hwreg->ncores = mvx_hwreg_read(hwreg, MVX_HWREG_NCORES); + hwreg->nlsid = mvx_hwreg_read(hwreg, MVX_HWREG_NLSID); + hwreg->core_mask = ((~mvx_hwreg_read_rcsu(hwreg, MVX_RCSU_HWREG_STRAP_PIN2)) + >> MVX_RCSU_HWREG_HARVESTING_SHIFT) & MVX_RCSU_HWREG_HARVESTING_MASK; + hwreg->core_mask = mvx_hwreg_verify_core_mask(hwreg); + return mvx_hwreg_ops_init(hwreg); +} + +void mvx_hwreg_get_hw_ver(struct mvx_hwreg *hwreg, struct mvx_hw_ver *hw_ver) +{ + *hw_ver = hwreg->hw_ver; +} + +uint32_t mvx_hwreg_get_fuse(struct mvx_hwreg *hwreg) +{ + return hwreg->fuse; +} + +uint32_t mvx_hwreg_get_ncores(struct mvx_hwreg *hwreg) +{ + if (hwreg->ncores > hw_ncores) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "Downscale hw cores to %d.", hw_ncores); + return hw_ncores; + } + return hwreg->ncores; +} + +uint32_t mvx_hwreg_get_nlsid(struct mvx_hwreg *hwreg) +{ + return hwreg->nlsid; +} + +uint32_t mvx_hwreg_get_core_mask(struct mvx_hwreg *hwreg) +{ + return mvx_hwreg_verify_core_mask(hwreg); +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg.h new file mode 100755 index 000000000000..728103af7f69 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg.h @@ -0,0 +1,383 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_HW_REG_ +#define _MVX_HW_REG_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include "mvx_if.h" +#include "mvx_lsid.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define MVX_HWREG_FUSE_DISABLE_AFBC (1 << 0) +#define MVX_HWREG_FUSE_DISABLE_REAL (1 << 1) +#define MVX_HWREG_FUSE_DISABLE_VPX (1 << 2) +#define MVX_HWREG_FUSE_DISABLE_HEVC (1 << 3) + +#define MVE_JOBQUEUE_JOB_BITS 8 +#define MVE_JOBQUEUE_JOB_MASK ((1 << MVE_JOBQUEUE_JOB_BITS) - 1) +#define MVE_JOBQUEUE_JOB_INVALID 0xf +#define MVE_JOBQUEUE_JOB_OBSOLETED 0xff +#define MVE_JOBQUEUE_NJOBS 4 +#define MVE_JOBQUEUE_LSID_SHIFT 0 +#define MVE_JOBQUEUE_LSID_BITS 4 +#define MVE_JOBQUEUE_LSID_MASK ((1 << MVE_JOBQUEUE_LSID_BITS) - 1) +#define MVE_JOBQUEUE_NCORES_SHIFT 4 +#define MVE_JOBQUEUE_NCORES_BITS 4 + +#define MVE_CORELSID_LSID_BITS 4 +#define MVX_CORELSID_LSID_MASK ((1 << MVE_CORELSID_LSID_BITS) - 1) + +#define MVE_CTRL_DISALLOW_SHIFT 0 +#define MVE_CTRL_DISALLOW_BITS 8 +#define MVE_CTRL_DISALLOW_MASK ((1 << MVE_CTRL_DISALLOW_BITS) - 1) +#define MVE_CTRL_MAXCORES_SHIFT 8 +#define MVE_CTRL_MAXCORES_BITS 4 +#define MVE_CTRL_MAXCORES_MASK ((1 << MVE_CTRL_MAXCORES_BITS) - 1) + +#define MVE_ALLOC_FREE 0 +#define MVE_ALLOC_NON_PROTECTED 1 +#define MVE_ALLOC_PROTECTED 2 + +#define MVE_BUSTCTRL_REF_SHIFT 2 + +/* These bits reset to zero and should be zero for normal operation. */ +#define MVE_CLKFORCE_CORE_CLK_SHIFT 0 +#define MVE_CLKFORCE_CORE_CLK_BITS 8 +#define MVE_CLKFORCE_SCHED_CLK_SHIFT 8 +#define MVE_CLKFORCE_SCHED_CLK_BITS 1 +#define MVE_CLKFORCE_CORE_PWR_SHIFT 9 +#define MVE_CLKFORCE_CORE_PWR_BITS 1 + +/* RCUS pgctrl register distribution */ +#define MVX_RCSU_HWREG_CLOCK_QCHANNEL_ENABLE (1 << 0) + +/* RCUS strap-pin0 register distribution */ +#define MVX_RCSU_HWREG_DISABLE_AFBC (1 << 0) +#define MVX_RCSU_HWREG_DISABLE_REAL (1 << 1) +#define MVX_RCSU_HWREG_DISABLE_VPX (1 << 2) +#define MVX_RCSU_HWREG_DISABLE_HEVC (1 << 3) +#define MVX_RCSU_HWREG_PSO_D_M_PWON_SHIFT 4 +#define MVX_RCSU_HWREG_PSO_D_M_PWON_BITS 8 +#define MVX_RCSU_HWREG_PSO_D_M_PWON_MASK ((1 << MVX_RCSU_HWREG_PSO_D_M_ASSERT_BITS) - 1) +#define MVX_RCSU_HWREG_PSO_ACK_ISO_PWON_SHIFT 12 +#define MVX_RCSU_HWREG_PSO_ACK_ISO_PWON_BITS 8 +#define MVX_RCSU_HWREG_PSO_ACK_ISO_PWON_MASK ((1 << MVX_RCSU_HWREG_PSO_ACK_ISO_PWON_BITS) - 1) +#define MVX_RCSU_HWREG_PSO_D_M_PWOFF_SHIFT 20 +#define MVX_RCSU_HWREG_PSO_D_M_PWOFF_BITS 8 +#define MVX_RCSU_HWREG_PSO_D_M_PWOFF_MASK ((1 << MVX_RCSU_HWREG_PSO_D_M_PWOFF_BITS) - 1) +#define MVX_RCSU_HWREG_ENPWOFF_SHIFT 28 +#define MVX_RCSU_HWREG_ENPWOFF_BITS 4 +#define MVX_RCSU_HWREG_ENPWOFF_MASK ((1 << MVX_RCSU_HWREG_ENPWOFF_BITS) - 1) + + +/* RCUS strap-pin2 register distribution */ +#define MVX_RCSU_HWREG_HARVESTING_SHIFT 4 +#define MVX_RCSU_HWREG_HARVESTING_BITS 4 +#define MVX_RCSU_HWREG_HARVESTING_MASK ((1 << MVX_RCSU_HWREG_HARVESTING_BITS) - 1) + +#define MVX_NUMBER_OF_LSID 4 +#define MVX_NUMBER_OF_CORES 4 + +/* Timeout(ms) of wait VPU clear terminate register */ +#define MVX_HWREG_TERMINATE_TIMEOUT 100 + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; + +/** + * enum mvx_hwreg_what - Hardware registers that can be read or written. + */ +enum mvx_hwreg_what { + MVX_HWREG_HARDWARE_ID, + MVX_HWREG_ENABLE, + MVX_HWREG_NCORES, + MVX_HWREG_NLSID, + MVX_HWREG_CORELSID, + MVX_HWREG_JOBQUEUE, + MVX_HWREG_IRQVE, + MVX_HWREG_CLKFORCE, + MVX_HWREG_SVNREV, + MVX_HWREG_FUSE, + MVX_HWREG_PROTCTRL, + MVX_HWREG_BUSCTRL, + MVX_HWREG_RESET, + MVX_HWREG_WHAT_MAX +}; + +/** + * enum mvx_rcsu_hwreg_what - Hardware rcsu registers that can be read or written. + */ +enum mvx_rcsu_hwreg_what { + MVX_RCSU_HWREG_PGCTRL, + MVX_RCSU_HWREG_STRAP_PIN0, + MVX_RCSU_HWREG_STRAP_PIN1, + MVX_RCSU_HWREG_STRAP_PIN2, + MVX_RCSU_HWREG_WHAT_MAX +}; + +/** + * enum mvx_hwreg_lsid - Hardware registers per LSID. + */ +enum mvx_hwreg_lsid { + MVX_HWREG_CTRL, + MVX_HWREG_MMU_CTRL, + MVX_HWREG_NPROT, + MVX_HWREG_ALLOC, + MVX_HWREG_FLUSH_ALL, + MVX_HWREG_SCHED, + MVX_HWREG_TERMINATE, + MVX_HWREG_LIRQVE, + MVX_HWREG_IRQHOST, + MVX_HWREG_INTSIG, + MVX_HWREG_STREAMID, + MVX_HWREG_BUSATTR_0, + MVX_HWREG_BUSATTR_1, + MVX_HWREG_BUSATTR_2, + MVX_HWREG_BUSATTR_3, + MVX_HWREG_LSID_MAX +}; + +/** + * enum mvx_busctrl_ref_type + * MVE_BUSTCTRL_REF_DEFAULT : 128-byte bursts are used for reference pixel data. + * MVE_BUSTCTRL_REF_MIX : a mix of 64-byte and 128-byte bursts are used for + * reference pixel data. + */ +enum mvx_busctrl_ref_type { + MVE_BUSTCTRL_REF_DEFAULT, + MVE_BUSTCTRL_REF_MIX +}; + +/** + * enum mvx_busctrl_split_type for configures how the video processor performs + * AXI burst accesses. + */ +enum mvx_busctrl_split_type { + MVE_BUSTCTRL_SPLIT_128, + MVE_BUSTCTRL_SPLIT_256, + MVE_BUSTCTRL_SPLIT_512 +}; + +struct mvx_hwreg; + +/** + * struct mvx_lsid_hwreg - Helper struct used for debugfs reading of lsid + * dependent registers. + */ +struct mvx_lsid_hwreg { + struct mvx_hwreg *hwreg; + unsigned int lsid; +}; + +/** + * struct mvx_hwreg - Context class for the hardware register interface. + */ +struct mvx_hwreg { + struct device *dev; + struct resource *rcsu_res; + void *rcsu_registers; + struct resource *res; + void *registers; + struct mvx_lsid_hwreg lsid_hwreg[MVX_LSID_MAX]; + struct mvx_hw_ver hw_ver; + uint32_t fuse; + uint32_t ncores; + uint32_t nlsid; + uint32_t core_mask; + wait_queue_head_t wait_queue; + + struct { + void (*get_formats)(enum mvx_direction direction, + uint64_t *formats); + } ops; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_hwreg_construct() - Construct the hardware register object. + * @hwreg: Pointer to hwreg object. + * @dev: Pointer to device struct. + * @rcsu_res: RCSU memory resource. + * @res: Memory resource. + * @parent: Parent debugfs directory entry. + * + * Return: 0 on success, else error code. + */ +int mvx_hwreg_construct(struct mvx_hwreg *hwreg, + struct device *dev, + struct resource *rcsu_res, + struct resource *res, + struct dentry *parent); + +/** + * mvx_hwreg_init() - Initialize hardware register object. + * @hwreg: Pointer to hwreg object. + * + * Return: 0 on success, else error code. + */ +int mvx_hwreg_init(struct mvx_hwreg *hwreg); + +/** + * mvx_hwreg_destruct() - Destroy the hardware register object. + * @hwreg: Pointer to hwreg object. + */ +void mvx_hwreg_destruct(struct mvx_hwreg *hwreg); + +/** + * mvx_hwreg_read() - Read hardware register. + * @hwreg: Pointer to hwreg object. + * @what: Which register to read. + * + * Return: Value of register. + */ +uint32_t mvx_hwreg_read(struct mvx_hwreg *hwreg, + enum mvx_hwreg_what what); + +/** + * mvx_hwreg_write() - Write hardware register. + * @hwreg: Pointer to hwreg object. + * @what: Which register to write. + * @value: Value to write. + */ +void mvx_hwreg_write(struct mvx_hwreg *hwreg, + enum mvx_hwreg_what what, + uint32_t value); + +/** + * mvx_hwreg_read_lsid() - Read LSID hardware register. + * @hwreg: Pointer to hwreg object. + * @lsid: LSID register index. + * @what: Which register to read. + * + * Return: Value of register. + */ +uint32_t mvx_hwreg_read_lsid(struct mvx_hwreg *hwreg, + unsigned int lsid, + enum mvx_hwreg_lsid what); + +/** + * mvx_hwreg_write_lsid() - Write LSID hardware register. + * @hwreg: Pointer to hwreg object. + * @lsid: LSID register index. + * @what: Which register to write. + * @value: Value to write. + */ +void mvx_hwreg_write_lsid(struct mvx_hwreg *hwreg, + unsigned int lsid, + enum mvx_hwreg_lsid what, + uint32_t value); + + +/** + * mvx_hwreg_read_rcsu() - Read rcsu hardware register. + * @hwreg: Pointer to rcsu hwreg object. + * @what: Which register to read. + * + * Return: Value of register. + */ +uint32_t mvx_hwreg_read_rcsu(struct mvx_hwreg *hwreg, + enum mvx_rcsu_hwreg_what what); + +/** + * mvx_hwreg_write_rcsu() - Write rcsu hardware register. + * @hwreg: Pointer to rcsu hwreg object. + * @what: Which register to write. + * @value: Value to write. + */ +void mvx_hwreg_write_rcsu(struct mvx_hwreg *hwreg, + enum mvx_rcsu_hwreg_what what, + uint32_t value); + +/** + * mvx_hwreg_get_hw_id() - Get hardware id. + * @hwreg: Pointer to hwreg object. + * + * Return: Hardware id. + */ +enum mvx_hw_id mvx_hwreg_get_hw_id(struct mvx_hwreg *hwreg); + +/** + * mvx_hwreg_get_hw_ver() - Get hardware id. + * @hwreg: Pointer to hwreg object. + * @hw_ver: Pointer to destination mvx_hw_ver object. + * + */ +void mvx_hwreg_get_hw_ver(struct mvx_hwreg *hwreg, struct mvx_hw_ver *hw_ver); + +/** + * mvx_hwreg_get_fuse() - Get hardware fuse. + * @hwreg: Pointer to hwreg object. + * + * Return: Value of hardware fuse. + */ +uint32_t mvx_hwreg_get_fuse(struct mvx_hwreg *hwreg); + +/** + * mvx_hwreg_get_ncores() - Get hardware core number. + * @hwreg: Pointer to hwreg object. + * + * Return: Value of hardware core number. + */ +uint32_t mvx_hwreg_get_ncores(struct mvx_hwreg *hwreg); + +/** + * mvx_hwreg_get_nlsid() - Get hardware lsid number. + * @hwreg: Pointer to hwreg object. + * + * Return: Value of hardware lsid number. + */ +uint32_t mvx_hwreg_get_nlsid(struct mvx_hwreg *hwreg); + +/** + * mvx_hwreg_get_core_mask() - Get hardware core mask according harvesting. + * @hwreg: Pointer to hwreg object. + * + * Return: Value of hardware core mask. + */ +uint32_t mvx_hwreg_get_core_mask(struct mvx_hwreg *hwreg); + +#endif /* _MVX_HW_REG_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v500.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v500.c new file mode 100755 index 000000000000..195f8adc03d8 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v500.c @@ -0,0 +1,65 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_bitops.h" +#include "mvx_hwreg_v500.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v500(enum mvx_direction direction, + uint64_t *formats) +{ + if (direction == MVX_DIR_INPUT) { + mvx_set_bit(MVX_FORMAT_H263, formats); + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_MPEG2, formats); + mvx_set_bit(MVX_FORMAT_MPEG4, formats); + mvx_set_bit(MVX_FORMAT_RV, formats); + mvx_set_bit(MVX_FORMAT_VC1, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + } else { + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_HEVC, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + } +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v500.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v500.h new file mode 100755 index 000000000000..8b442aff346c --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v500.h @@ -0,0 +1,48 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_HWREG_V500_H_ +#define _MVX_HWREG_V500_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_if.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v500(enum mvx_direction direction, + uint64_t *formats); + +#endif /* _MVX_HWREG_V500_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v52_v76.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v52_v76.c new file mode 100755 index 000000000000..50999544cb86 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v52_v76.c @@ -0,0 +1,120 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include "mvx_bitops.h" +#include "mvx_hwreg_v52_v76.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v52_v76(enum mvx_direction direction, + uint64_t *formats) +{ + if (direction == MVX_DIR_INPUT) { + mvx_set_bit(MVX_FORMAT_AVS, formats); + mvx_set_bit(MVX_FORMAT_AVS2, formats); + mvx_set_bit(MVX_FORMAT_H263, formats); + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_HEVC, formats); + mvx_set_bit(MVX_FORMAT_JPEG, formats); + mvx_set_bit(MVX_FORMAT_MPEG2, formats); + mvx_set_bit(MVX_FORMAT_MPEG4, formats); + mvx_set_bit(MVX_FORMAT_RV, formats); + mvx_set_bit(MVX_FORMAT_VC1, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_VP9, formats); + mvx_set_bit(MVX_FORMAT_AV1, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats); + mvx_set_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats); + mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats); + mvx_set_bit(MVX_FORMAT_RGBA_8888, formats); + mvx_set_bit(MVX_FORMAT_BGRA_8888, formats); + mvx_set_bit(MVX_FORMAT_ARGB_8888, formats); + mvx_set_bit(MVX_FORMAT_ABGR_8888, formats); + + mvx_set_bit(MVX_FORMAT_RGB_888_3P, formats); + mvx_set_bit(MVX_FORMAT_Y, formats); + mvx_set_bit(MVX_FORMAT_YUV444, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_2P_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_1P_10, formats); + mvx_set_bit(MVX_FORMAT_Y_10, formats); + mvx_set_bit(MVX_FORMAT_YUV444_10, formats); + + } else { + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_HEVC, formats); + mvx_set_bit(MVX_FORMAT_JPEG, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_VP9, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_Y_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_Y_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats); + mvx_set_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats); + mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats); + mvx_set_bit(MVX_FORMAT_RGB_888, formats); + mvx_set_bit(MVX_FORMAT_BGR_888, formats); + mvx_set_bit(MVX_FORMAT_RGB_888_3P, formats); + mvx_set_bit(MVX_FORMAT_Y, formats); + mvx_set_bit(MVX_FORMAT_Y_10, formats); + mvx_set_bit(MVX_FORMAT_YUV444, formats); + mvx_set_bit(MVX_FORMAT_YUV444_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_2P_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_1P_10, formats); + + } +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v52_v76.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v52_v76.h new file mode 100755 index 000000000000..5bf066fcb829 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v52_v76.h @@ -0,0 +1,49 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_HWREG_V52_V76_H_ +#define _MVX_HWREG_V52_V76_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_hwreg.h" +#include "mvx_if.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v52_v76(enum mvx_direction direction, + uint64_t *formats); + +#endif /* _MVX_HWREG_V52_V76_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v550.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v550.c new file mode 100755 index 000000000000..46eda4410198 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v550.c @@ -0,0 +1,82 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_bitops.h" +#include "mvx_hwreg_v550.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v550(enum mvx_direction direction, + uint64_t *formats) +{ + if (direction == MVX_DIR_INPUT) { + mvx_set_bit(MVX_FORMAT_H263, formats); + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_HEVC, formats); + mvx_set_bit(MVX_FORMAT_JPEG, formats); + mvx_set_bit(MVX_FORMAT_MPEG2, formats); + mvx_set_bit(MVX_FORMAT_MPEG4, formats); + mvx_set_bit(MVX_FORMAT_RV, formats); + mvx_set_bit(MVX_FORMAT_VC1, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats); + mvx_set_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats); + mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats); + } else { + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats); + mvx_set_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats); + mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats); + } +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v550.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v550.h new file mode 100755 index 000000000000..1133b8c340aa --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v550.h @@ -0,0 +1,48 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_HWREG_V550_H_ +#define _MVX_HWREG_V550_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_if.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v550(enum mvx_direction direction, + uint64_t *formats); + +#endif /* _MVX_HWREG_V550_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v61.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v61.c new file mode 100755 index 000000000000..408f686091ae --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v61.c @@ -0,0 +1,95 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include "mvx_bitops.h" +#include "mvx_hwreg_v61.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v61(enum mvx_direction direction, + uint64_t *formats) +{ + if (direction == MVX_DIR_INPUT) { + mvx_set_bit(MVX_FORMAT_H263, formats); + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_HEVC, formats); + mvx_set_bit(MVX_FORMAT_JPEG, formats); + mvx_set_bit(MVX_FORMAT_MPEG2, formats); + mvx_set_bit(MVX_FORMAT_MPEG4, formats); + mvx_set_bit(MVX_FORMAT_RV, formats); + mvx_set_bit(MVX_FORMAT_VC1, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_VP9, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats); + mvx_set_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats); + mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats); + mvx_set_bit(MVX_FORMAT_RGBA_8888, formats); + mvx_set_bit(MVX_FORMAT_BGRA_8888, formats); + mvx_set_bit(MVX_FORMAT_ARGB_8888, formats); + mvx_set_bit(MVX_FORMAT_ABGR_8888, formats); + } else { + mvx_set_bit(MVX_FORMAT_H264, formats); + mvx_set_bit(MVX_FORMAT_HEVC, formats); + mvx_set_bit(MVX_FORMAT_JPEG, formats); + mvx_set_bit(MVX_FORMAT_VP8, formats); + mvx_set_bit(MVX_FORMAT_VP9, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_set_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_set_bit(MVX_FORMAT_YUV420_I420, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV12, formats); + mvx_set_bit(MVX_FORMAT_YUV420_NV21, formats); + mvx_set_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_set_bit(MVX_FORMAT_YUV420_Y0L2, formats); + mvx_set_bit(MVX_FORMAT_YUV420_AQB1, formats); + mvx_set_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_set_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_set_bit(MVX_FORMAT_YUV422_Y210, formats); + } +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v61.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v61.h new file mode 100755 index 000000000000..7ef544534df5 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_hwreg_v61.h @@ -0,0 +1,49 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_HWREG_V61_H_ +#define _MVX_HWREG_V61_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_hwreg.h" +#include "mvx_if.h" + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +void mvx_hwreg_get_formats_v61(enum mvx_direction direction, + uint64_t *formats); + +#endif /* _MVX_HWREG_V61_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_lsid.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_lsid.c new file mode 100755 index 000000000000..9b8289fb6b48 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_lsid.c @@ -0,0 +1,359 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include "mvx_if.h" +#include "mvx_hwreg.h" +#include "mvx_lsid.h" +#include "mvx_log_group.h" +#include "mvx_session.h" + +/* + * In CIX SKY1, bus_attributes must be 0x33, especially bus_attribute[0] ARCACHE + * must be 0x3(Normal Non-cacheable Bufferable). Otherwise, VPU load firmware + * may failed or occurs other unexpected crash. + * + * Todo: remove this debug param in formal release. + */ +static uint bus_attributes[4] = {0x33, 0x22, 0x33, 0x33}; +module_param_array(bus_attributes, uint, NULL, 0660); +MODULE_PARM_DESC(bus_attributes, "Bus attributes."); + +/**************************************************************************** + * Private functions + ****************************************************************************/ + +static bool is_alloc(struct mvx_lsid *lsid) +{ + uint32_t alloc; + + alloc = mvx_hwreg_read_lsid(lsid->hwreg, lsid->lsid, + MVX_HWREG_ALLOC); + + return alloc != MVE_ALLOC_FREE; +} + +static uint32_t get_core_lsid(uint32_t reg, + unsigned int core) +{ + return (reg >> (MVE_CORELSID_LSID_BITS * core)) & + MVX_CORELSID_LSID_MASK; +} + +static uint32_t get_jobqueue_job(uint32_t reg, + unsigned int nr) +{ + return (reg >> (MVE_JOBQUEUE_JOB_BITS * nr)) & MVE_JOBQUEUE_JOB_MASK; +} + +static uint32_t set_jobqueue_job(uint32_t reg, + unsigned int nr, + uint32_t job) +{ + reg &= ~(MVE_JOBQUEUE_JOB_MASK << (nr * MVE_JOBQUEUE_JOB_BITS)); + reg |= job << (MVE_JOBQUEUE_JOB_BITS * nr); + return reg; +} + +static uint32_t get_jobqueue_lsid(uint32_t reg, + unsigned int nr) +{ + return (reg >> (MVE_JOBQUEUE_JOB_BITS * nr + MVE_JOBQUEUE_LSID_SHIFT)) & + MVE_JOBQUEUE_LSID_MASK; +} + +static uint32_t set_lsid_ncores(uint32_t reg, + unsigned int nr, + unsigned int lsid, + unsigned int ncores) +{ + reg &= ~(MVE_JOBQUEUE_JOB_MASK << (nr * MVE_JOBQUEUE_JOB_BITS)); + reg |= ((lsid << MVE_JOBQUEUE_LSID_SHIFT) | + ((ncores - 1) << MVE_JOBQUEUE_NCORES_SHIFT)) << + (nr * MVE_JOBQUEUE_JOB_BITS); + + return reg; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_lsid_construct(struct mvx_lsid *lsid, + struct device *dev, + struct mvx_hwreg *hwreg, + unsigned int id) +{ + lsid->dev = dev; + lsid->hwreg = hwreg; + lsid->session = NULL; + lsid->lsid = id; + + return 0; +} + +void mvx_lsid_destruct(struct mvx_lsid *lsid) +{} + +int mvx_lsid_map(struct mvx_lsid *lsid, + struct mvx_lsid_pcb *pcb) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + uint32_t alloc; + uint32_t busattr[4]; + int i; + int ret; + + /* Check that the LSID is not already allocated. */ + if (is_alloc(lsid)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Failed to map session to LSID. LSID already allocated. lsid=%u.", + lsid->lsid); + return -EFAULT; + } + + /* Allocate LSID. */ + alloc = pcb->nprot == 0 ? MVE_ALLOC_PROTECTED : MVE_ALLOC_NON_PROTECTED; + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_ALLOC, alloc); + + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_TERMINATE, 1); + do { + ret = mvx_hwreg_read_lsid(hwreg, lsid->lsid, + MVX_HWREG_TERMINATE); + } while (ret != 0); + + /* Configure number of cores to use and which to cores to disable. */ + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_CTRL, + pcb->ctrl); + + /* Configure MMU L0 entry and flush MMU tables. */ + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_MMU_CTRL, + pcb->mmu_ctrl); + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_FLUSH_ALL, 0); + + if (device_property_read_u32_array(lsid->dev, "busattr", busattr, + ARRAY_SIZE(busattr))) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_DEBUG, + "busattr in of_node is not available."); + + /* We apply module params in this case for debug purpose. */ + memcpy(busattr, bus_attributes, sizeof(busattr)); + } + for (i = 0; i < ARRAY_SIZE(busattr); i++) + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_VERBOSE, + "busattr[%d] = 0x%x.", i, busattr[i]); + + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_0, + busattr[0]); + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_1, + busattr[1]); + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_2, + busattr[2]); + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_BUSATTR_3, + busattr[3]); + + /* Restore interrupt registers. */ + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_LIRQVE, 0); + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_IRQHOST, + pcb->irqhost); + + /* + * Make sure all register writes have completed before scheduling is + * enabled. + */ + wmb(); + + /* Enable scheduling. */ + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_SCHED, 1); + + return 0; +} + +void mvx_lsid_unmap(struct mvx_lsid *lsid, + struct mvx_lsid_pcb *pcb) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + + if (!is_alloc(lsid)) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "LSID was not allocated. lsid=%u.", + lsid->lsid); + return; + } + + /* Disable scheduling. */ + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_SCHED, 0); + + /* Store registers in process control block. */ + pcb->ctrl = mvx_hwreg_read_lsid(hwreg, lsid->lsid, MVX_HWREG_CTRL); + pcb->mmu_ctrl = mvx_hwreg_read_lsid(hwreg, lsid->lsid, + MVX_HWREG_MMU_CTRL); + pcb->irqhost = mvx_hwreg_read_lsid(hwreg, lsid->lsid, + MVX_HWREG_IRQHOST); + pcb->nprot = mvx_hwreg_read_lsid(hwreg, lsid->lsid, MVX_HWREG_NPROT); + + /* Deallocate LSID. */ + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_ALLOC, + MVE_ALLOC_FREE); +} + +int mvx_lsid_jobqueue_add(struct mvx_lsid *lsid, + unsigned int ncores, + uint32_t (* sort)(uint32_t jobqueue, + struct mvx_lsid lsid[])) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + uint32_t jobqueue; + int i; + + /* Disable scheduling. */ + mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 0); + + jobqueue = mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE); + + /* Search if the LSID is already in the job queue. */ + for (i = 0; i < MVE_JOBQUEUE_NJOBS; i++) + if (get_jobqueue_lsid(jobqueue, i) == lsid->lsid) + goto jobqueue_enable; + + /* Search for a free slot in the job queue. */ + for (i = 0; i < MVE_JOBQUEUE_NJOBS; i++) + if (get_jobqueue_lsid(jobqueue, i) == + MVE_JOBQUEUE_JOB_INVALID) { + jobqueue = set_lsid_ncores(jobqueue, i, lsid->lsid, + ncores); + jobqueue = sort(jobqueue, lsid - lsid->lsid); + mvx_hwreg_write(hwreg, MVX_HWREG_JOBQUEUE, jobqueue); + break; + } + +jobqueue_enable: + /* Reenable scheduling. */ + mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 1); + + return i < MVE_JOBQUEUE_NJOBS ? 0 : -EAGAIN; +} + +void mvx_lsid_send_irq(struct mvx_lsid *lsid) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_IRQHOST, 1); +} + +void mvx_lsid_trigger_irqve(struct mvx_lsid *lsid) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_LIRQVE, 1); +} + +void mvx_lsid_flush_mmu(struct mvx_lsid *lsid) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_FLUSH_ALL, 0); +} + +void mvx_lsid_terminate(struct mvx_lsid *lsid) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + uint32_t ret; + + mvx_hwreg_write_lsid(hwreg, lsid->lsid, MVX_HWREG_TERMINATE, 1); + + ret = wait_event_interruptible_timeout(hwreg->wait_queue, + mvx_hwreg_read_lsid(hwreg, lsid->lsid, MVX_HWREG_TERMINATE) == 0, + msecs_to_jiffies(MVX_HWREG_TERMINATE_TIMEOUT)); + if (!ret) + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Wait LSID[%d] termiate timeout", lsid->lsid); +} + +void mvx_lsid_jobqueue_remove(struct mvx_lsid *lsid) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + uint32_t jobqueue; + int i; + int j; + + /* Disable scheduling. */ + mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 0); + + jobqueue = mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE); + + /* Copy job entries that do not match the LSID to be removed. */ + for (i = 0, j = 0; i < MVE_JOBQUEUE_NJOBS; i++) + if (get_jobqueue_lsid(jobqueue, i) != lsid->lsid) + jobqueue = set_jobqueue_job( + jobqueue, j++, get_jobqueue_job(jobqueue, i)); + + /* Blank out remaining job entries. */ + for (; j < MVE_JOBQUEUE_NJOBS; j++) + jobqueue = set_lsid_ncores(jobqueue, j, + MVE_JOBQUEUE_JOB_INVALID, 0); + + mvx_hwreg_write(hwreg, MVX_HWREG_JOBQUEUE, jobqueue); + + /* Reenable scheduling. */ + mvx_hwreg_write(hwreg, MVX_HWREG_ENABLE, 1); +} + +bool mvx_lsid_idle(struct mvx_lsid *lsid) +{ + struct mvx_hwreg *hwreg = lsid->hwreg; + uint32_t jobqueue; + uint32_t corelsid; + uint32_t ncores; + uint32_t i; + + jobqueue = mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE); + corelsid = mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID); + ncores = mvx_hwreg_read(hwreg, MVX_HWREG_NCORES); + + /* Check if LSID is found in job queue. */ + for (i = 0; i < MVE_JOBQUEUE_NJOBS; i++) + if (get_jobqueue_lsid(jobqueue, i) == lsid->lsid) + return false; + + /* Check if LSID is found in core lsid. */ + for (i = 0; i < ncores; i++) + if (get_core_lsid(corelsid, i) == lsid->lsid) + return false; + + return true; +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_lsid.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_lsid.h new file mode 100755 index 000000000000..bd1d1a956ee9 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_lsid.h @@ -0,0 +1,175 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_LSID_H_ +#define _MVX_LSID_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define MVX_LSID_MAX 4 + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct mvx_hwreg; +struct mvx_sched_session; + +/** + * struct mvx_lsid_pcb - LSID process control block. + * + * This structure is used to store the register map when a session is unmapped + * from a LSID, so it can be restored again when the session is remapped. + */ +struct mvx_lsid_pcb { + uint32_t ctrl; + uint32_t mmu_ctrl; + uint32_t irqhost; + uint32_t nprot; +}; + +/** + * struct mvx_lsid - LSID class. + */ +struct mvx_lsid { + struct device *dev; + struct mvx_hwreg *hwreg; + struct mvx_sched_session *session; + unsigned int lsid; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_lsid_construct() - Construct the LSID object. + * @lsid: Pointer to LSID object. + * @dev: Pointer to device. + * @hwreg: Pointer to hwreg object. + * @id: LSID number. + * + * Return: 0 on success, else error code. + */ +int mvx_lsid_construct(struct mvx_lsid *lsid, + struct device *dev, + struct mvx_hwreg *hwreg, + unsigned int id); + +/** + * mvx_lsid_destruct() - Destruct the LSID object. + * @lsid: Pointer to LSID object. + */ +void mvx_lsid_destruct(struct mvx_lsid *lsid); + +/** + * mvx_lsid_map() - Map a session to this LSID. + * @lsid: Pointer to LSID object. + * @pcb: Process control block to be restored. + * + * Return: 0 on success, else error code. + */ +int mvx_lsid_map(struct mvx_lsid *lsid, + struct mvx_lsid_pcb *pcb); + +/** + * mvx_lsid_unmap() - Unmap session from LSID. + * @lsid: Pointer to LSID object. + * @pcb: Process control block where the registers are stored. + * + * A LSID must not be unmapped if it is present in the job queue or core LSID. + * It is the responsibility of the scheduler to guarantee that the LSID is idle + * before it is unmapped. + */ +void mvx_lsid_unmap(struct mvx_lsid *lsid, + struct mvx_lsid_pcb *pcb); + +/** + * mvx_lsid_jobqueue_add() - Add LSID to job queue. + * @lsid: Pointer to LSID object. + * @ncores: Number of cores to request. + * @sort: Callback function to sort to new job in job queue. + * + * Return: 0 on success, else error code. + */ +int mvx_lsid_jobqueue_add(struct mvx_lsid *lsid, + unsigned int ncores, + uint32_t (* sort)(uint32_t jobqueue, + struct mvx_lsid lsid[])); + +/** + * mvx_lsid_send_irq() - Send IRQ to firmware. + * @lsid: Pointer to LSID object. + */ +void mvx_lsid_send_irq(struct mvx_lsid *lsid); + +/** + * mvx_lsid_trigger_irqve() - Send trigger IRQVE. + * @lsid: Pointer to LSID object. + */ +void mvx_lsid_trigger_irqve(struct mvx_lsid *lsid); + +/** + * mvx_lsid_flush_mmu() - Flush MMU tables. + * @lsid: Pointer to LSID object. + */ +void mvx_lsid_flush_mmu(struct mvx_lsid *lsid); + +/** + * mvx_lsid_terminate() - Terminate the LSID. + * @lsid: Pointer to LSID object. + */ +void mvx_lsid_terminate(struct mvx_lsid *lsid); + +/** + * mvx_lsid_jobqueue_remove() - Remove LSID from job queue. + * @lsid: Pointer to LSID object. + */ +void mvx_lsid_jobqueue_remove(struct mvx_lsid *lsid); + +/** + * mvx_lsid_idle() - Check if LSID is idle. + * @lsid: Pointer to LSID object. + * + * Return: true if LSID is idle, else false. + */ +bool mvx_lsid_idle(struct mvx_lsid *lsid); + +#endif /* _MVX_LSID_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_scheduler.c b/drivers/media/platform/cix/cix_vpu/dev/mvx_scheduler.c new file mode 100755 index 000000000000..3b4d67182c79 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_scheduler.c @@ -0,0 +1,1158 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include "mvx_if.h" +#include "mvx_hwreg.h" +#include "mvx_mmu.h" +#include "mvx_scheduler.h" +#include "mvx_session.h" +#include "mvx_seq.h" +#include "mvx_pm_runtime.h" +#include "mvx_log_group.h" + +/**************************************************************************** + * Private variables + ****************************************************************************/ + +static int wait_scheduler_timeout = 1000; +module_param(wait_scheduler_timeout, int, 0660); + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +static struct mvx_lsid *find_free_lsid(struct mvx_sched *sched) +{ + unsigned int i; + + for (i = 0; i < sched->nlsid; i++) + if (sched->lsid[i].session == NULL) + return &sched->lsid[i]; + + return NULL; +} + +static struct mvx_lsid *find_idle_lsid(struct mvx_sched *sched) +{ + unsigned int i; + + for (i = 0; i < sched->nlsid; i++) { + bool idle; + + idle = mvx_lsid_idle(&sched->lsid[i]); + if (idle != false) + return &sched->lsid[i]; + } + + return NULL; +} + +static int map_session(struct mvx_sched *sched, + struct mvx_sched_session *session, + struct mvx_lsid *lsid) +{ + int ret; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "%px Map LSID. lsid=%u, jobqueue=%08x, corelsid=%08x.", + mvx_if_session_to_session(session->isession), + lsid->lsid, + mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE), + mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID)); + + ret = mvx_lsid_map(lsid, &session->pcb); + if (ret != 0) + return ret; + + session->lsid = lsid; + lsid->session = session; + + return 0; +} + +static void unmap_session(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + struct mvx_lsid *lsid = session->lsid; + + if (lsid == NULL) + return; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "%px Unmap LSID. lsid=%u, jobqueue=%08x, corelsid=%08x.", + mvx_if_session_to_session(session->isession), + lsid->lsid, + mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE), + mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID)); + + mvx_lsid_unmap(lsid, &session->pcb); + session->lsid = NULL; + lsid->session = NULL; +} + +static struct list_head *list_find_node(struct list_head *list, + struct list_head *node) +{ + struct list_head *i; + + list_for_each(i, list) { + if (i == node) + return i; + } + + return NULL; +} + +static void set_sched_state(struct mvx_sched *sched, int state) +{ + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "switch scheduler state, %d -> %d", + sched->state, state); + sched->state = state; +} + +static uint32_t sort_jobs(uint32_t jobqueue, + struct mvx_lsid lsid[MVX_LSID_MAX]) +{ + int i; + uint32_t new_jobqueue = 0x0F0F0F0F; + uint32_t new_job = 0, next_job = 0; + uint32_t id, ncores; + struct mvx_lsid *new = NULL, *next = NULL; + + /* Find the new job to be sorted */ + for (i = 24; i >= 0; i -= 8) { + new_job = (jobqueue >> i) & 0xFF; + if (new_job != MVE_JOBQUEUE_JOB_INVALID) { + if (new_job == MVE_JOBQUEUE_JOB_OBSOLETED) + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "Found obsoleted job in the end of jobqueue, remove it"); + else + break; + } + } + id = new_job & 0x0F; + if (id < MVX_LSID_MAX) { + new = &lsid[id]; + } else { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Invalid job lsid %d in jobqueue %08x", id, jobqueue); + return jobqueue; // Invalid job, return original queue + } + + /* Insert the new job into the job queue */ + for (i -= 8; i >= 0; i -= 8) { + next_job = (jobqueue >> i) & 0xFF; + id = next_job & 0x0F; + if (id < MVX_LSID_MAX) { + next = &lsid[id]; + } else { + if (next_job == MVE_JOBQUEUE_JOB_OBSOLETED) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "Found obsoleted job in jobqueue, remove it"); + continue; + } else { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, + "Invalid job lsid %d in jobqueue %08x", id, jobqueue); + return jobqueue; // Invalid job, return original queue + } + } + + ncores = (next_job >> 4) + 1; + if ((new->session->priority >= next->session->priority_in_queue) || + (i == 0 && ncores < next->session->isession->ncores)) + break; + + new_jobqueue = (new_jobqueue << 8) | next_job; + if (next->session->priority_in_queue > 1 && + new->session->priority > 0) + next->session->priority_in_queue--; + } + new_jobqueue = (new_jobqueue << 8) | new_job; + + /* Add the remaining jobs */ + for (; i >= 0; i -= 8) { + next_job = (jobqueue >> i) & 0xFF; + new_jobqueue = (new_jobqueue << 8) | next_job; + } + + return new_jobqueue; +} + +/** + * pending list is only updated when sched is locked. + * a session can only be added once + * + * notify_list = [] + * lock_sched + * for pending in pending_list: + * if is_mapped(pending): + * jobqueue.add(pending) + * pending_list.remove(pending) + * continue + * + * l = free_lsid + * if l is Nul: + * l = idle_lsid + * if l is Nul: + * break + * if is_mapped(l): + * s = session[l] + * unmap(s) + * notify_list.add(s) + * + * map(pending) + * jobqueue.add(pending) + * pending_list.remove(pending) + * unlock_sched + * + * for s in notify_list: + * session_notify(s) + * notify_list.remove(s) + */ +static void sched_task(struct work_struct *ws) +{ + struct mvx_sched *sched = + container_of(ws, struct mvx_sched, sched_task); + struct mvx_sched_session *pending; + struct mvx_sched_session *unmapped; + struct mvx_sched_session *tmp; + LIST_HEAD(notify_list); + int ret; + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) + return; + + if (sched->state == MVX_SCHED_STATE_IDLE) { + if (list_empty_careful(&sched->pending) == false) + set_sched_state(sched, MVX_SCHED_STATE_RUNNING); + } else if (sched->state == MVX_SCHED_STATE_SUSPEND) { + mutex_unlock(&sched->mutex); + return; + } + + /* + * Try to map sessions from pending queue while possible. + */ + list_for_each_entry_safe(pending, tmp, &sched->pending, pending) { + struct mvx_lsid *lsid; + + /* + * This session is already mapped to LSID. + * Just make sure it is scheduled. + */ + if (pending->lsid != NULL) { + ret = mvx_lsid_jobqueue_add(pending->lsid, + pending->isession->ncores, + sort_jobs); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Cannot add pending session to job queue. csession=%px, mvx_session=%px", + pending, + mvx_if_session_to_session( + pending->isession)); + continue; + } + + pending->in_pending = false; + list_del(&pending->pending); + continue; + } + + /* Find LSID to be used for the pending session. */ + lsid = find_free_lsid(sched); + if (lsid == NULL) + lsid = find_idle_lsid(sched); + + if (lsid == NULL) + break; + + /* + * This LSID was mapped to some session. We have to notify + * the session about an irq in case there are messages in + * a message queue. + * + * Notifications are done after pending list is processed. + */ + if (lsid->session != NULL) { + struct mvx_sched_session *unmapped = lsid->session; + + unmap_session(sched, unmapped); + + /* + * If the reference count is 0, then the session is + * about to be removed and should be ignored. + */ + ret = kref_get_unless_zero(&unmapped->isession->kref); + if (ret != 0) { + if (list_find_node(¬ify_list, + &unmapped->notify)) + /* + * Consider a situation when a session + * that was unmapped from LSID and added + * notify_list was also present in the + * pending_list. It is possible that + * such a session will be mapped to the + * new LSID, executed by the hardware + * and switched to idle state while + * this function is still looping + * through pending list. + * + * If it happens, then this session + * might be unmapped again in order to + * make a room for another pending + * session. As a result we will try to + * add this session to notify_list + * again. This will break notify list + * and could lead to crashes or hangs. + * + * However, it is safe just to skip + * adding the session to notify_list if + * it is already there, because it will + * be processed anyway. + */ + kref_put(&unmapped->isession->kref, + unmapped->isession->release); + else + list_add_tail(&unmapped->notify, + ¬ify_list); + } else { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Ref is zero. csession=%px", + unmapped); + } + } + + ret = map_session(sched, pending, lsid); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Cannot map pending session. csession=%px, mvx_session=%px", + pending, + mvx_if_session_to_session( + pending->isession)); + break; + } + + ret = mvx_lsid_jobqueue_add(lsid, pending->isession->ncores, sort_jobs); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Cannot add pending session to job queue. csession=%px, mvx_session=%px", + pending, + mvx_if_session_to_session( + pending->isession)); + continue; + } + + pending->in_pending = false; + list_del(&pending->pending); + } + + /* + * It is important that the scheduler mutex is released before the + * callbacks to the if-module are invoked. The if-module may issue + * requests to the dev-module (for example switch_in()) that would + * otherwise deadlock. + */ + mutex_unlock(&sched->mutex); + + list_for_each_entry_safe(unmapped, tmp, ¬ify_list, notify) { + struct mvx_if_session *iunmapped = unmapped->isession; + + list_del(&unmapped->notify); + + mutex_lock(iunmapped->mutex); + sched->if_ops->irq(iunmapped); + ret = kref_put(&iunmapped->kref, iunmapped->release); + if (ret == 0) + mutex_unlock(iunmapped->mutex); + } +} + +static void sched_session_print(struct seq_file *s, + struct mvx_sched_session *session, + struct mvx_hwreg *hwreg, + int ind) +{ + struct mvx_lsid *lsid; + + if (session == NULL) + return; + + mvx_seq_printf(s, "Client session", ind, "%px\n", session->isession); + mvx_seq_printf(s, "Dev session", ind, "%px\n", session); + mvx_seq_printf(s, "MVX session", ind, "%px\n", + mvx_if_session_to_session(session->isession)); + + lsid = session->lsid; + if (lsid == NULL) + return; + + mvx_seq_printf(s, "IRQ host", ind, "%d\n", + mvx_hwreg_read_lsid(hwreg, lsid->lsid, + MVX_HWREG_IRQHOST)); + mvx_seq_printf(s, "IRQ MVE", ind, "%d\n", + mvx_hwreg_read_lsid(hwreg, lsid->lsid, + MVX_HWREG_LIRQVE)); +} + +static int sched_show(struct seq_file *s, + void *v) +{ + struct mvx_sched *sched = (struct mvx_sched *)s->private; + struct mvx_hwreg *hwreg = sched->hwreg; + struct mvx_sched_session *session; + int i; + int ret; + + ret = mvx_pm_runtime_get_sync(hwreg->dev); + if (ret < 0) + return 0; + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) { + mvx_pm_runtime_put_sync(hwreg->dev); + return ret; + } + + mvx_seq_printf(s, "Core LSID", 0, "%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID)); + mvx_seq_printf(s, "Job queue", 0, "%08x\n", + mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE)); + seq_puts(s, "\n"); + + seq_puts(s, "scheduled:\n"); + for (i = 0; i < sched->nlsid; ++i) { + mvx_seq_printf(s, "LSID", 1, "%d\n", i); + session = sched->lsid[i].session; + sched_session_print(s, session, hwreg, 2); + } + + seq_puts(s, "pending:\n"); + i = 0; + list_for_each_entry(session, &sched->pending, pending) { + char tmp[10]; + + scnprintf(tmp, sizeof(tmp), "%d", i++); + mvx_seq_printf(s, tmp, 1, "\n"); + sched_session_print(s, session, hwreg, 2); + } + + mutex_unlock(&sched->mutex); + mvx_pm_runtime_put_sync(hwreg->dev); + + return 0; +} + +static int sched_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sched_show, inode->i_private); +} + +static const struct file_operations sched_fops = { + .open = sched_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +int sched_debugfs_init(struct mvx_sched *sched, + struct dentry *parent) +{ + struct dentry *dentry; + + dentry = debugfs_create_file("sched", 0400, parent, sched, + &sched_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + return 0; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_sched_construct(struct mvx_sched *sched, + struct device *dev, + struct mvx_if_ops *if_ops, + struct mvx_hwreg *hwreg, + struct dentry *parent) +{ + unsigned int lsid; + int ret; + + sched->dev = dev; + sched->hwreg = hwreg; + sched->if_ops = if_ops; + sched->state = MVX_SCHED_STATE_IDLE; + init_completion(&sched->cmp); + mutex_init(&sched->mutex); + INIT_LIST_HEAD(&sched->pending); + INIT_LIST_HEAD(&sched->sessions); + mvx_log_perf.sessions = &sched->sessions; + mutex_init(&sched->sessions_mutex); + INIT_WORK(&sched->sched_task, sched_task); + sched->sched_queue = create_singlethread_workqueue("mvx_sched"); + if (!sched->sched_queue) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "Cannot create work queue"); + return -EINVAL; + } + + sched->nlsid = mvx_hwreg_get_nlsid(hwreg); + + for (lsid = 0; lsid < sched->nlsid; lsid++) { + ret = mvx_lsid_construct(&sched->lsid[lsid], dev, hwreg, lsid); + if (ret != 0) + goto destruct_lsid; + } + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = sched_debugfs_init(sched, parent); + if (ret != 0) + goto destruct_lsid; + } + + return 0; + +destruct_lsid: + while (lsid-- > 0) + mvx_lsid_destruct(&sched->lsid[lsid]); + + return ret; +} + +void mvx_sched_destruct(struct mvx_sched *sched) +{ + destroy_workqueue(sched->sched_queue); + + while (sched->nlsid-- > 0) + mvx_lsid_destruct(&sched->lsid[sched->nlsid]); +} + +int mvx_sched_session_construct(struct mvx_sched_session *session, + struct mvx_if_session *isession) +{ + uint32_t disallow; + uint32_t maxcores; + struct mvx_session *s = mvx_if_session_to_session(isession); + + session->isession = isession; + INIT_LIST_HEAD(&session->pending); + INIT_LIST_HEAD(&session->notify); + session->lsid = NULL; + session->in_pending = false; + + memset(&session->pcb, 0, sizeof(session->pcb)); + + disallow = (~isession->core_mask) & MVE_CTRL_DISALLOW_MASK; + maxcores = isession->ncores & MVE_CTRL_MAXCORES_MASK; + session->pcb.ctrl = (disallow << MVE_CTRL_DISALLOW_SHIFT) | + (maxcores << MVE_CTRL_MAXCORES_SHIFT); + + session->pcb.mmu_ctrl = isession->l0_pte; + session->pcb.nprot = isession->securevideo == false; + session->priority = s->priority; + session->priority_in_queue = session->priority_pending = session->priority; + + return 0; +} + +void mvx_sched_session_destruct(struct mvx_sched_session *session) +{} + +void mvx_sched_list_insert_by_priority(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + struct mvx_sched_session *tmp; + + /* To minimize impact on no priority case, use reverse iteration */ + list_for_each_entry_reverse(tmp, &sched->pending, pending) { + if (session->priority >= tmp->priority_pending) { + list_add(&session->pending, &tmp->pending); + return; + } else if (tmp->priority_pending > 1 && session->priority > 0) { + tmp->priority_pending--; + } + } + + list_add(&session->pending, &sched->pending); +} + +int mvx_sched_switch_in(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + int ret; + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "%px Switch in session. jobqueue=%08x, coreslid=%08x.", + mvx_if_session_to_session(session->isession), + mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE), + mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID)); + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) + return ret; + + if (session->in_pending) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_VERBOSE, + "Session is already in pending state."); + goto unlock_mutex; + } + + session->in_pending = true; + session->priority_pending = session->priority; + mvx_sched_list_insert_by_priority(sched, session); + queue_work(sched->sched_queue, &sched->sched_task); + +unlock_mutex: + mutex_unlock(&sched->mutex); + return 0; +} + +int mvx_sched_switch_out_rsp(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + int i; + int ret; + bool all_lsid_idle = true; + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "get scheduler lock fail."); + return ret; + } + + for (i = 0; i < sched->nlsid; i++) + all_lsid_idle &= mvx_lsid_idle(&sched->lsid[i]); + + for (i = 0; i < sched->nlsid; i++) { + struct mvx_sched_session *ss = sched->lsid[i].session; + struct mvx_session *ls = NULL; + + if (ss != NULL) { + ls = mvx_if_session_to_session(ss->isession); + all_lsid_idle &= ls ? !ls->switched_in : true; + } + } + + if (sched->state == MVX_SCHED_STATE_SUSPEND) { + if (all_lsid_idle == false) + goto end; + + complete(&sched->cmp); + } else if (sched->state == MVX_SCHED_STATE_RUNNING) { + if (list_empty_careful(&sched->pending) && all_lsid_idle == true) + set_sched_state(sched, MVX_SCHED_STATE_IDLE); + } + +end: + mutex_unlock(&sched->mutex); + return ret; +} + + +int mvx_sched_send_irq(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + mutex_lock(&sched->mutex); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_DEBUG, + "%px Send irq. lsid=%d, jobqueue=%08x, corelsid=%08x.", + mvx_if_session_to_session(session->isession), + session->lsid == NULL ? -1 : session->lsid->lsid, + mvx_hwreg_read(sched->hwreg, MVX_HWREG_JOBQUEUE), + mvx_hwreg_read(sched->hwreg, MVX_HWREG_CORELSID)); + + if (session->lsid == NULL) + session->pcb.irqhost = 1; + else + mvx_lsid_send_irq(session->lsid); + + mutex_unlock(&sched->mutex); + + return 0; +} + +int mvx_sched_trigger_irq(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + mutex_lock(&sched->mutex); + + if (session->lsid == NULL) { + mutex_unlock(&sched->mutex); + return 0; + } + + mvx_lsid_trigger_irqve(session->lsid); + mvx_lsid_jobqueue_add(session->lsid, session->lsid->session->isession->ncores, sort_jobs); + mvx_lsid_send_irq(session->lsid); + + mutex_unlock(&sched->mutex); + + return 0; +} + +int mvx_sched_flush_mmu(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + mutex_lock(&sched->mutex); + + if (session->lsid != NULL) + mvx_lsid_flush_mmu(session->lsid); + + mutex_unlock(&sched->mutex); + + return 0; +} + +static void print_session(struct mvx_sched *sched, + struct mvx_sched_session *session, + struct mvx_session *s) +{ + int lsid = -1; + uint32_t irqve = 0; + uint32_t irqhost = 0; + + if (session != NULL && session->lsid != NULL) { + struct mvx_hwreg *hwreg = sched->hwreg; + + lsid = session->lsid->lsid; + irqve = mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_LIRQVE); + irqhost = mvx_hwreg_read_lsid(hwreg, lsid, MVX_HWREG_IRQHOST); + } + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "%px session=%px, lsid=%d, irqve=%08x, irqhost=%08x", + s, mvx_if_session_to_session(session->isession), lsid, + irqve, irqhost); +} + +void mvx_sched_print_debug(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + struct mvx_hwreg *hwreg = sched->hwreg; + struct mvx_sched_session *pending; + struct mvx_sched_session *tmp; + struct mvx_session *s = mvx_if_session_to_session(session->isession); + unsigned int i; + int ret; + + mvx_pm_runtime_get_sync(sched->dev); + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) { + mvx_pm_runtime_put_sync(sched->dev); + return; + } + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "%px Current session:", s); + print_session(sched, session, s); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "%px Pending queue:", s); + list_for_each_entry_safe(pending, tmp, &sched->pending, pending) { + print_session(sched, pending, s); + } + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "%px Print register:", s); + + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "%px jobqueue=%08x, corelsid=%08x, irqve=%08x", + s, + mvx_hwreg_read(hwreg, MVX_HWREG_JOBQUEUE), + mvx_hwreg_read(hwreg, MVX_HWREG_CORELSID), + mvx_hwreg_read(hwreg, MVX_HWREG_IRQVE)); + + for (i = 0; i < sched->nlsid; i++) { + struct mvx_sched_session *ss = sched->lsid[i].session; + struct mvx_session *ls = NULL; + + if (ss != NULL) + ls = mvx_if_session_to_session(ss->isession); + + MVX_LOG_PRINT( + &mvx_log_dev, MVX_LOG_WARNING, + "%px lsid=%u, session=%px, irqve=%08x, irqhost=%08x", + s, i, ls, + mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_LIRQVE), + mvx_hwreg_read_lsid(hwreg, i, MVX_HWREG_IRQHOST)); + } + + mutex_unlock(&sched->mutex); + + mvx_pm_runtime_put_sync(sched->dev); +} + +void mvx_sched_handle_irq(struct mvx_sched *sched, + unsigned int lsid) +{ + struct mvx_sched_session *session; + struct mvx_if_session *isession = NULL; + int ret; + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) + return; + + /* + * If a session has been terminated/unmapped just before the IRQ bottom + * handler has been executed, then the session pointer will be NULL or + * may even point at a different session. This is an unharmful + * situation. + * + * If the reference count is 0, then the session is about to be removed + * and should be ignored. + */ + session = sched->lsid[lsid].session; + if (session != NULL) { + ret = kref_get_unless_zero(&session->isession->kref); + if (ret != 0) + isession = session->isession; + } else { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, + "lsid[%d].session has been terminated or unmapped", lsid); + } + + /* + * It is important that the scheduler mutex is released before the + * callbacks to the if-module are invoked. The if-module may issue + * requests to the dev-module (for example switch_in()) that would + * otherwise deadlock. + */ + mutex_unlock(&sched->mutex); + + /* Inform if-session that an IRQ was received. */ + if (isession != NULL) { + mutex_lock(isession->mutex); + sched->if_ops->irq(isession); + ret = kref_put(&isession->kref, isession->release); + + if (ret == 0) + mutex_unlock(isession->mutex); + } + + queue_work(sched->sched_queue, &sched->sched_task); +} + +void mvx_sched_terminate(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + struct list_head *head; + struct list_head *tmp; + + mutex_lock(&sched->mutex); + + if (session->lsid != NULL) { + mvx_lsid_jobqueue_remove(session->lsid); + mvx_lsid_terminate(session->lsid); + unmap_session(sched, session); + } + + list_for_each_safe(head, tmp, &sched->pending) { + if (head == &session->pending) { + list_del(head); + break; + } + } + + mutex_unlock(&sched->mutex); +} + +void mvx_sched_reset_priority(struct mvx_sched *sched, + struct mvx_sched_session *session) +{ + mutex_lock(&sched->mutex); + session->priority_in_queue = session->priority; + mutex_unlock(&sched->mutex); +} + +static int mvx_sched_cancel_session_work(struct mvx_sched *sched) +{ + struct mvx_sched_session *session; + struct mvx_sched_session *tmp; + + mutex_lock(&sched->sessions_mutex); + + list_for_each_entry_safe(session, tmp, &sched->sessions, session) { + if (session && session->isession) + mvx_session_cancel_work(mvx_if_session_to_session(session->isession)); + } + + mutex_unlock(&sched->sessions_mutex); + + return 0; +} + +int mvx_sched_suspend(struct mvx_sched *sched) +{ + int ret; + int i; + bool wait_suspend = false; + + for (i = 0; i < sched->nlsid; i++) { + struct mvx_sched_session *ss = sched->lsid[i].session; + struct mvx_session *ls = NULL; + + if (ss != NULL) { + ls = mvx_if_session_to_session(ss->isession); + if (ls && ls->job_frames == 0 && ls->switched_in) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "lsid[%d] insert switch-out when suspend.", i); + mvx_session_switch_out(ls); + } + } + } + + reinit_completion(&sched->cmp); + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "Get scheduler lock fail."); + return -EBUSY; + } + + if (sched->state == MVX_SCHED_STATE_RUNNING) + wait_suspend = true; + set_sched_state(sched, MVX_SCHED_STATE_SUSPEND); + mutex_unlock(&sched->mutex); + + if (wait_suspend) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_INFO, "Waiting scheduler idle."); + ret = wait_for_completion_timeout(&sched->cmp, msecs_to_jiffies(wait_scheduler_timeout)); + if (!ret) + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_ERROR, "Waiting scheduler idle timeout."); + } + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "Get scheduler lock fail."); + return -EBUSY; + } + + for (i = 0; i < MVX_LSID_MAX; i++) + if (sched->lsid[i].session) + unmap_session(sched, sched->lsid[i].session); + + mutex_unlock(&sched->mutex); + + return ret; +} + +int mvx_sched_resume(struct mvx_sched *sched) +{ + int ret = 0; + + if (IS_ERR_OR_NULL(sched->dev)) + return ret; + + ret = mutex_lock_interruptible(&sched->mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "get scheduler lock fail."); + return ret; + } + set_sched_state(sched, MVX_SCHED_STATE_IDLE); + mutex_unlock(&sched->mutex); + + queue_work(sched->sched_queue, &sched->sched_task); + + return ret; +} + +static unsigned long calculate_session_load(struct mvx_session *session) +{ + unsigned long mbs; + struct mvx_session_port *port_in = &session->port[MVX_DIR_INPUT]; + struct mvx_session_port *port_out = &session->port[MVX_DIR_OUTPUT]; + uint32_t fps; + uint32_t buf_fps; + struct timespec64 now; + struct timespec64 delta; + + + if (session->fw_state == MVX_FW_STATE_STOPPED) + return 0; + + mbs = (ALIGN(session->orig_width, 16) / 16) * (ALIGN(session->orig_height, 16) / 16); + + ktime_get_real_ts64(&now); + delta = timespec64_sub(now, session->last_timespec); + + /* + * If actual fps exceeds 20% of the setting, assume current setting fps is low, + * update work load base on actual fps to meet performance requirement. + * The sampling interval is 0.5s. + * It is usually used in DFS simple_ondemand mode. + */ + if (timespec64_to_ns(&delta) / NSEC_PER_MSEC > 500) { + buf_fps = port_in->buffers_in_window * NSEC_PER_SEC / timespec64_to_ns(&delta); + fps = (buf_fps > ((session->fps_n / session->fps_d) * 120 / 100)) ? + buf_fps : (session->fps_n / session->fps_d); + + port_in->buffers_in_window = 0; + port_out->buffers_in_window = 0; + session->last_timespec = now; + session->last_fps = fps; + } else { + fps = max(session->last_fps, session->fps_n / session->fps_d); + } + + // The performance of encode is half that of decode, we use decode as the benchmark. + if (session->is_encoder) { + fps *= 2; + if (port_out->format == MVX_FORMAT_VP8) + fps = fps * 4 / 3; // VP8 encode is 1.33x slower + } else { + if (MVX_IS_LEGACY_FORMAT(port_in->format)) + fps = fps * 8 / 3; // Legacy formats are 2.67x slower + } + + return mbs * fps; +} + +int mvx_sched_calculate_load(struct mvx_sched *sched, unsigned long *mbs_per_sec) +{ + struct mvx_sched_session *session; + struct mvx_sched_session *tmp; + + if (IS_ERR_OR_NULL(&sched->sessions)) + return -EINVAL; + + if (!mutex_trylock(&sched->sessions_mutex)) + return -EBUSY; + + *mbs_per_sec = 0; + list_for_each_entry_safe(session, tmp, &sched->sessions, session) { + if (session && session->isession) + *mbs_per_sec += calculate_session_load(mvx_if_session_to_session(session->isession)); + } + + mutex_unlock(&sched->sessions_mutex); + + return 0; +} + +static void update_session_job_frames(struct mvx_sched *sched, uint32_t job_frames) +{ + struct mvx_sched_session *session; + struct mvx_sched_session *tmp; + + list_for_each_entry_safe(session, tmp, &sched->sessions, session) { + if (session && session->isession) { + struct mvx_session * s = mvx_if_session_to_session(session->isession); + if (s->job_frames == 0 && job_frames == 1) + s->pending_switch_out = true; + s->job_frames = job_frames; + } + } +} + +int mvx_sched_add_session(struct mvx_sched *sched, struct list_head *session) +{ + int ret = 0; + + ret = mutex_lock_interruptible(&sched->sessions_mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, "get scheduler lock fail."); + return ret; + } + + list_add_tail(session, &sched->sessions); + sched->session_count++; + if (sched->session_count <= 2 ) { + uint32_t job_frames = sched->session_count == 1 ? 0 : 1; + update_session_job_frames(sched, job_frames); + } + + mutex_unlock(&sched->sessions_mutex); + + return 0; +} + +int mvx_sched_remove_session(struct mvx_sched *sched, struct list_head *session) +{ + mutex_lock(&sched->sessions_mutex); + + if (session->prev != NULL && session->next != NULL && sched->session_count > 0) { + list_del(session); + sched->session_count--; + if (sched->session_count == 1) + update_session_job_frames(sched, 0); + } + + mutex_unlock(&sched->sessions_mutex); + + return 0; +} + +bool mvx_sched_sessions_empty(struct mvx_sched *sched) +{ + return list_empty_careful(&sched->sessions); +} + +int mvx_sched_cancel_work(struct mvx_sched *sched) +{ + int ret; + + ret = mvx_sched_cancel_session_work(sched); + if (ret != 0) + return ret; + + cancel_work_sync(&sched->sched_task); + + return ret; +} + +void mvx_sched_get_realtime_fps(struct list_head *sessions) +{ + struct mvx_sched_session *session; + struct mvx_sched_session *tmp; + struct mvx_sched *sched = + container_of(sessions, struct mvx_sched, sessions); + + mutex_lock(&sched->sessions_mutex); + + list_for_each_entry_safe(session, tmp, sessions, session) { + if (session && session->isession) { + struct mvx_session *s = mvx_if_session_to_session(session->isession); + mvx_session_update_realtime_fps(s); + } + } + + mutex_unlock(&sched->sessions_mutex); +} diff --git a/drivers/media/platform/cix/cix_vpu/dev/mvx_scheduler.h b/drivers/media/platform/cix/cix_vpu/dev/mvx_scheduler.h new file mode 100755 index 000000000000..c1d48fc59576 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/dev/mvx_scheduler.h @@ -0,0 +1,290 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_SCHEDULER_H_ +#define _MVX_SCHEDULER_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include "mvx_lsid.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct mvx_if_ops; +struct mvx_hwreg; + +enum mvx_sched_state { + MVX_SCHED_STATE_IDLE, + MVX_SCHED_STATE_RUNNING, + MVX_SCHED_STATE_SUSPEND +}; + +/** + * struct mvx_sched - Scheduler class. + * @dev: Pointer to device. + * @if_ops: Pointer to if module operations. + * @hwreg: Pointer to hwreg. + * @mutex: Mutex protecting the scheduler. + * @pending: List if sessions pending scheduling. + * @nlsid: Number of LSID. + * @lsid: Array of LSID instances. + */ +struct mvx_sched { + struct device *dev; + struct mvx_if_ops *if_ops; + struct mvx_hwreg *hwreg; + struct mutex mutex; + struct list_head pending; + struct list_head sessions; + struct mutex sessions_mutex; + unsigned int nlsid; + struct mvx_lsid lsid[MVX_LSID_MAX]; + struct work_struct sched_task; + struct workqueue_struct *sched_queue; + enum mvx_sched_state state; + struct completion cmp; + unsigned int session_count; +}; + +/** + * struct mvx_sched_session - Client session class. + * @isession: Pointer to if session. + * @head: List head used to insert session into scheduler pending list. + * @lsid: Pointer to LSID the session is mapped to. + * @pcb: LSID pcb. + * + * This struct is used to keep track of sessions specific information. + */ +struct mvx_sched_session { + struct mvx_if_session *isession; + struct list_head pending; + struct list_head notify; + struct list_head session; + struct mvx_lsid *lsid; + struct mvx_lsid_pcb pcb; + bool in_pending; + uint32_t priority; + uint32_t priority_pending; + uint32_t priority_in_queue; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_sched_construct() - Construct the scheduler object. + * @sched: Pointer to scheduler object. + * @dev: Pointer to device. + * @if_ops: Pointer to if ops. + * @hwreg: Pointer to hwreg. + * @parent: Pointer to parent debugfs directory entry. + * + * Return: 0 on success, else errorr code. + */ +int mvx_sched_construct(struct mvx_sched *sched, + struct device *dev, + struct mvx_if_ops *if_ops, + struct mvx_hwreg *hwreg, + struct dentry *parent); + +/** + * mvx_sched_destruct() - Destruct the scheduler object. + * @sched: Pointer to scheduler object. + */ +void mvx_sched_destruct(struct mvx_sched *sched); + +/** + * mvx_sched_session_construct() - Construct the scheduler session object. + * @if_ops: If module operations. + * @session: Pointer to session object. + * @isession: Pointer to if session. + * + * Return: 0 on success, else error code. + */ +int mvx_sched_session_construct(struct mvx_sched_session *session, + struct mvx_if_session *isession); + +/** + * mvx_sched_session_destruct() - Destruct the scheduler session object. + * @session: Pointer to session object. + * + * The client must make sure the session is terminated before the destructor + * is called. + */ +void mvx_sched_session_destruct(struct mvx_sched_session *session); + +/** + * mvx_sched_switch_in() - Switch in a session. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + * + * Map a session to a LSID and schedule session for execution. If no LSID + * is available the session is placed in the pending queue. + * + * Return: 0 on success, else error code. + */ +int mvx_sched_switch_in(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_switch_out_rsp() - Handle Switch out response for a session. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + * + * Switch scheduler state, and acknowledge all LSID idle when suspend. + * + * Return: 0 on success, else error code. + */ +int mvx_sched_switch_out_rsp(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_send_irq() - Send IRQ to session. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + * + * Return: 0 on success, else error code. + */ +int mvx_sched_send_irq(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_send_irq() - Soft trigger IRQVE and IRQHOST. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + * + * Return: 0 on success, else error code. + */ +int mvx_sched_trigger_irq(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_flush_mmu() - Flush MMU tables. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + * + * Return: 0 on success, else error code. + */ +int mvx_sched_flush_mmu(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_handle_irq() - Handle interrupt for a LSID. + * @sched: Pointer to scheduler object. + * @lsid: LSID number. + */ +void mvx_sched_handle_irq(struct mvx_sched *sched, + unsigned int lsid); + +/** + * mvx_sched_terminate() - Terminate a session. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + */ +void mvx_sched_terminate(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_reset_priority() - Reset priority of a session. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + */ +void mvx_sched_reset_priority(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_print_debug() - Print debug information. + * @sched: Pointer to scheduler object. + * @session: Pointer to session object. + */ +void mvx_sched_print_debug(struct mvx_sched *sched, + struct mvx_sched_session *session); + +/** + * mvx_sched_suspend() - Handle device pm suspend. + * @sched: Pointer to scheduler object. + */ +int mvx_sched_suspend(struct mvx_sched *sched); + +/** + * mvx_sched_resume() - Handle device pm resume. + * @sched: Pointer to scheduler object. + */ +int mvx_sched_resume(struct mvx_sched *sched); + +/** + * mvx_sched_calculate_load() - calculate current work loading. + * @sched: Pointer to scheduler object. + * @mbs_per_sec: Current work loading. + */ +int mvx_sched_calculate_load(struct mvx_sched *sched, unsigned long *mbs_per_sec); + +/** + * mvx_sched_add_session() - add session to list. + * @sched: Pointer to scheduler object. + * @session: The session to add. + */ +int mvx_sched_add_session(struct mvx_sched *sched, struct list_head *session); + +/** + * mvx_sched_remove_session() - remove session from list. + * @sched: Pointer to scheduler object. + * @session: The session to remove. + */ +int mvx_sched_remove_session(struct mvx_sched *sched, struct list_head *session); + +/** + * mvx_sched_sessions_empty() - whether session list is empty. + * @sched: Pointer to scheduler object. + */ +bool mvx_sched_sessions_empty(struct mvx_sched *sched); + +/** + * mvx_sched_cancel_work() - cancel mvx_sched works. + * @sched: Pointer to scheduler object. + */ +int mvx_sched_cancel_work(struct mvx_sched *sched); + +/** + * mvx_sched_get_realtime_fps() - get average fps of each session. + * @sessions: Pointer to session list. + */ +void mvx_sched_get_realtime_fps(struct list_head *sessions); + +#endif /* _MVX_SCHEDULER_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/external/fw_v2/mve_protocol_def.h b/drivers/media/platform/cix/cix_vpu/external/fw_v2/mve_protocol_def.h new file mode 100755 index 000000000000..f5c7eddebcf9 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/external/fw_v2/mve_protocol_def.h @@ -0,0 +1,2049 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/* + * Copyright: + * ---------------------------------------------------------------------------- + * This confidential and proprietary software may be used only as authorized + * by a licensing agreement from Arm Technology (China) Co., Ltd. + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * The entire notice above must be reproduced on all authorized copies and + * copies may only be made to the extent permitted by a licensing agreement + * from Arm Technology (China) Co., Ltd. + * ---------------------------------------------------------------------------- + */ +#ifndef __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ +#define __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +/***************************************************************************** + * + * Communication protocol between the host/driver and the MVE firmware, + * the 'host interface'. + * + * MVE == LINLON Video Engine + * + * Protocol version 2.5 + * + * Note: Message structs may be expanded in the future; the host should + * use the 'size' of the message to determine how many bytes to + * read from the message queue, rather than a sizeof(struct). + * + ****************************************************************************/ + + + + + + + + + + + + + + + + + +/***************************************************************************** + * + * Virtual memory regions + * + * ..._ADDR_BEGIN gives the starting virtual address of the region, + * and ..._ADDR_END the (non-inclusive) ending address, such that + * the size of the region is obtained with the subtraction + * (..._ADDR_END - ..._ADDR_BEGIN). + * + ****************************************************************************/ + +/* Memory region for first firmware instance */ +#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN (0x00000000u) +#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_END (0x000FFFFFu + 1) + +/* + * Areas for communication between host and MVE are placed in the interval + * 0x10079000 - 0x1007FFFF, see special defines further down. + */ + +/* PROTECTED virtual memory region */ +#define MVE_MEM_REGION_PROTECTED_ADDR_BEGIN (0x20000000u) +#define MVE_MEM_REGION_PROTECTED_ADDR_END (0x4FFFFFFFu + 1) + +/* FRAMEBUF virtual memory region */ +#define MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN (0x50000000u) +#define MVE_MEM_REGION_FRAMEBUF_ADDR_END (0x7FFFFFFFu + 1) + +/* Memory regions for other firmware instances */ +#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN (0x80000000u) +#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN (0x90000000u) +#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN (0xA0000000u) +#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN (0xB0000000u) +#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN (0xC0000000u) +#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN (0xD0000000u) +#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN (0xE0000000u) +#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +/* 0xF0000000 - 0xFFFFFFFF is used internally in MVE */ + + + + + + + + + + + + + + + + +/***************************************************************************** + * + * Communication queues between HOST/DRIVER and MVE + * + * Address for queue for messages in to MVE, + * one struct mve_comm_area_host located here + * + ****************************************************************************/ + +#define MVE_COMM_MSG_INQ_ADDR (0x10079000u) + +/* Address for queue for messages out from MVE, + * one struct mve_comm_area_mve located here + */ +#define MVE_COMM_MSG_OUTQ_ADDR (0x1007A000u) + +/* Address for queue for input buffers in to MVE, + * one struct mve_comm_area_host located here + */ +#define MVE_COMM_BUF_INQ_ADDR (0x1007B000u) + +/* Address for queue for input buffers returned from MVE, + * one struct mve_comm_area_mve located here + */ +#define MVE_COMM_BUF_INRQ_ADDR (0x1007C000u) + +/* Address for queue for output buffers in to MVE, + * one struct mve_comm_area_host located here + */ +#define MVE_COMM_BUF_OUTQ_ADDR (0x1007D000u) + +/* Address for queue for output buffers returned from MVE, + * one struct mve_comm_area_mve located here + */ +#define MVE_COMM_BUF_OUTRQ_ADDR (0x1007E000u) + +/* One struct mve_rpc_communication_area located here */ +#define MVE_COMM_RPC_ADDR (0x1007F000u) + +/* Address for ram_print buffer in FW */ +#define MVE_FW_PRINT_RAM_ADDR (0x10100000u) +#define MVE_FW_PRINT_RAM_SIZE (0x80000u) + +/* One page of memory (4 kB) is used for each queue, + * so maximum 1024 words, but need room for some counters as well, + * see structs mve_comm_area_mve and mve_comm_area_host below. + */ +#define MVE_COMM_QUEUE_SIZE_IN_WORDS 1020 + +/* This is the part of the message area that is written by host. */ +struct mve_comm_area_host +{ + volatile uint16_t out_rpos; + volatile uint16_t in_wpos; + volatile uint32_t reserved[ 3 ]; + /* + * Queue of messages to MVE, each block of data prefixed with + * a mve_msg_header + */ + volatile uint32_t in_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ]; +}; + +/* This is the part of the message area that is written by MVE. */ +struct mve_comm_area_mve +{ + volatile uint16_t out_wpos; + volatile uint16_t in_rpos; + volatile uint32_t reserved[ 3 ]; + /* + * Queue of messages to host, each block of data prefixed with + * a mve_msg_header + */ + volatile uint32_t out_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ]; +}; + +#define MVE_RPC_AREA_SIZE_IN_WORDS 256 +#define MVE_RPC_DATA_SIZE_IN_WORDS (MVE_RPC_AREA_SIZE_IN_WORDS - 3) +union mve_rpc_params +{ + volatile uint32_t data[ MVE_RPC_DATA_SIZE_IN_WORDS ]; + struct + { + char string[ MVE_RPC_DATA_SIZE_IN_WORDS * 4 ]; + } debug_print; + struct + { + uint32_t size; + uint32_t max_size; + uint8_t region; /* Memory region selection */ + #define MVE_MEM_REGION_PROTECTED (0) + #define MVE_MEM_REGION_OUTBUF (1) + #define MVE_MEM_REGION_FRAMEBUF (MVE_MEM_REGION_OUTBUF) + + /* The newly allocated memory must be placed + * on (at least) a 2^(log2_alignment) boundary + */ + uint8_t log2_alignment; + } mem_alloc; + struct + { + uint32_t ve_pointer; + uint32_t new_size; + } mem_resize; + struct + { + uint32_t ve_pointer; + } mem_free; +}; + +struct mve_rpc_communication_area +{ + volatile uint32_t state; + #define MVE_RPC_STATE_FREE (0) + #define MVE_RPC_STATE_PARAM (1) + #define MVE_RPC_STATE_RETURN (2) + volatile uint32_t call_id; + #define MVE_RPC_FUNCTION_DEBUG_PRINTF (1) + #define MVE_RPC_FUNCTION_MEM_ALLOC (2) + #define MVE_RPC_FUNCTION_MEM_RESIZE (3) + #define MVE_RPC_FUNCTION_MEM_FREE (4) + volatile uint32_t size; + union mve_rpc_params params; +}; + +struct mve_fw_ram_print_head_aera +{ + volatile uint32_t rd_cnt; + volatile uint32_t reserved0[15]; + + volatile uint32_t flag; + volatile uint32_t index; + volatile uint32_t wr_cnt; + volatile uint32_t reserved1[13]; +}; + + + + + + + + + + + + + + + +/********************************************************************* + * + * Message codes + * + *********************************************************************/ + +/* Messages consist of one struct mve_msg_header, possibly followed + * by extra data. + */ +struct mve_msg_header +{ + uint16_t code; + /* REQUESTs are messages from the + * host/driver to the firmware: Code: Extra data in message: */ + #define MVE_REQUEST_CODE_GO (1001) /* no extra data */ + #define MVE_REQUEST_CODE_STOP (1002) /* no extra data */ + #define MVE_REQUEST_CODE_INPUT_FLUSH (1003) /* no extra data */ + #define MVE_REQUEST_CODE_OUTPUT_FLUSH (1004) /* no extra data */ + #define MVE_REQUEST_CODE_SWITCH (1005) /* no extra data */ + #define MVE_REQUEST_CODE_PING (1006) /* no extra data */ + #define MVE_REQUEST_CODE_DUMP (1008) /* no extra data */ + #define MVE_REQUEST_CODE_JOB (1009) /* struct mve_request_job */ + #define MVE_REQUEST_CODE_SET_OPTION (1010) /* struct mve_request_set_option (variable size) */ + #define MVE_REQUEST_CODE_RELEASE_REF_FRAME (1011) /* struct mve_request_release_ref_frame */ + #define MVE_REQUEST_CODE_IDLE_ACK (1012) /* no extra data */ + #define MVE_REQUEST_CODE_DEBUG (1013) /* level: 0 for disable, refer to fw_log_level */ + /* RESPONSEs are messages from + * the firmware to the host: */ + #define MVE_RESPONSE_CODE_SWITCHED_IN (2001) /* struct mve_response_switched_in */ + #define MVE_RESPONSE_CODE_SWITCHED_OUT (2002) /* struct mve_response_switched_out */ + #define MVE_RESPONSE_CODE_SET_OPTION_CONFIRM (2003) /* no extra data */ + #define MVE_RESPONSE_CODE_JOB_DEQUEUED (2004) /* struct mve_response_job_dequeued */ + #define MVE_RESPONSE_CODE_INPUT (2005) /* no extra data, but buffer placed in buffer queue */ + #define MVE_RESPONSE_CODE_OUTPUT (2006) /* no extra data, but buffer placed in buffer queue */ + #define MVE_RESPONSE_CODE_INPUT_FLUSHED (2007) /* no extra data */ + #define MVE_RESPONSE_CODE_OUTPUT_FLUSHED (2008) /* no extra data */ + #define MVE_RESPONSE_CODE_PONG (2009) /* no extra data */ + #define MVE_RESPONSE_CODE_ERROR (2010) /* struct mve_response_error */ + #define MVE_RESPONSE_CODE_STATE_CHANGE (2011) /* struct mve_response_state_change */ + #define MVE_RESPONSE_CODE_DUMP (2012) /* no extra data */ + #define MVE_RESPONSE_CODE_IDLE (2013) /* no extra data */ + #define MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM (2014) /* struct mve_response_frame_alloc_parameters */ + #define MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS (2015) /* struct mve_response_sequence_parameters */ + #define MVE_RESPONSE_CODE_EVENT (2016) /* struct mve_response_event (variable size) */ + #define MVE_RESPONSE_CODE_SET_OPTION_FAIL (2017) /* struct mve_response_set_option_failed */ + #define MVE_RESPONSE_CODE_REF_FRAME_UNUSED (2018) /* struct mve_response_ref_frame_unused */ + #define MVE_RESPONSE_CODE_DEBUG (2019) /* no extra data */ + #define MVE_RESPONSE_CODE_TRACE (2050) /* msg_header(code, size), counter, sum */ + /* BUFFERs are sent from host to firmware, + * and then return at some time: */ + #define MVE_BUFFER_CODE_FRAME (3001) /* struct mve_buffer_frame */ + #define MVE_BUFFER_CODE_BITSTREAM (3002) /* struct mve_buffer_bitstream */ + #define MVE_BUFFER_CODE_PARAM (3003) /* struct mve_buffer_param */ + #define MVE_BUFFER_CODE_GENERAL (3004) /* struct mve_buffer_general */ + + uint16_t size; /* size in bytes of trailing data, 0 if none */ +}; + + + + + + + + + + + + + + + + +/********************************************************************* + * + * REQUESTs are messages from the host to the firmware + * + * Some of the MVE_REQUEST_CODE_ codes are followed by one of the + * structs below. + * + *********************************************************************/ + +struct mve_request_job +{ + uint16_t cores; /* >= 1, number of cores to use, must match request to HW scheduler */ + uint16_t frames; /* number of frames to process, zero means infinite */ + uint32_t flags; /* can be zero */ + #define MVE_JOB_FLAG_DISABLE_BNDMGR (0x01) +}; + +struct mve_response_trace +{ + struct mve_msg_header msg_hdr; + uint32_t sum; + uint32_t count; +}; + +struct mve_request_set_option +{ + uint32_t index; + #define MVE_SET_OPT_INDEX_NALU_FORMAT (1) /* see arg, MVE_OPT_NALU_FORMAT_ */ + #define MVE_SET_OPT_INDEX_STREAM_ESCAPING (2) /* arg=1 to enable (default), arg=0 to disable */ + #define MVE_SET_OPT_INDEX_PROFILE_LEVEL (3) /* data.profile_level */ + #define MVE_SET_OPT_INDEX_HOST_PROTOCOL_PRINTS (4) /* arg=1 to enable, arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_PROFILING (5) /* arg=1 to enable, arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_DISABLE_FEATURES (6) /* see arg, MVE_OPT_DISABLE_FEATURE_ */ + #define MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS (7) /* decode, arg=1 to enable, + * arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_FRAME_REORDERING (8) /* decode, arg=1 to enable (default), + * arg=0 to disable */ + #define MVE_SET_OPT_INDEX_INTBUF_SIZE (9) /* decode, arg = suggested limit of intermediate + * buffer allocation */ + #define MVE_SET_OPT_INDEX_ENC_P_FRAMES (16) /* encode, arg = nPFrames */ + #define MVE_SET_OPT_INDEX_ENC_B_FRAMES (17) /* encode, arg = number of B frames */ + #define MVE_SET_OPT_INDEX_GOP_TYPE (18) /* encode, see arg */ + #define MVE_SET_OPT_INDEX_INTRA_MB_REFRESH (19) /* encode, arg */ + #define MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED (20) /* encode, arg = 0 or 1 */ + #define MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC (21) /* encode, arg = 0 or 1 */ + #define MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP (22) /* encode, arg = 0 or 1 */ + #define MVE_SET_OPT_INDEX_TILES (23) /* encode, data.tiles */ + #define MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE (24) /* HEVC encode, arg = 8 or 16, + * for sizes 8x8 or 16x16 */ + #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE (25) /* encode, see arg */ + #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE (26) /* encode, see arg */ + #define MVE_SET_OPT_INDEX_ENC_H264_CABAC (27) /* encode, arg = 0 or 1, enabled by default */ + #define MVE_SET_OPT_INDEX_ENC_SLICE_SPACING (28) /* encode, arg = suggested number of + * CTUs/macroblocks in a slice */ + #define MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE (30) /* VP9 encode, see arg */ + #define MVE_SET_OPT_INDEX_RESYNC_INTERVAL (31) /* JPEG encode, arg = nRestartInterval + * = nResynchMarkerSpacing */ + #define MVE_SET_OPT_INDEX_HUFFMAN_TABLE (32) /* JPEG encode, data.huffman_table */ + #define MVE_SET_OPT_INDEX_QUANT_TABLE (33) /* JPEG encode, data.quant_table */ + #define MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES (34) /* encode only, disabled by default */ + #define MVE_SET_OPT_INDEX_MBINFO_OUTPUT (35) /* encode, arg=1 to enable, + * arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_MV_SEARCH_RANGE (36) /* encode, data,motion_vector_search_range */ + #define MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH (38) /* encode, data.bitdepth, to set other bitdepth + * of encoded stream than of input frames */ + #define MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT (39) /* encode, arg, to set other chroma format of + * encoded stream than of input frames */ + #define MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE (40) /* encode, arg, select which way RGB is converted + * to YUV before encoding */ + #define MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT (41) /* encode, arg, the maxium bandwidth limit defined + * by host */ + #define MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT (42) /* arg=timeout, arg=0 to disable */ + #define MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC (43) /* encode, arg; 0,1,2 for H264; 0,1 for HEVC */ + #define MVE_SET_OPT_INDEX_ENC_ADPTIVE_QUANTISATION (44) /* encode (h264 and hevc) */ + #define MVE_SET_OPT_INDEX_QP_DELTA_I_P (45) + #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_REF (46) + #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_NONREF (47) + #define MVE_SET_OPT_INDEX_CB_QP_OFFSET (48) + #define MVE_SET_OPT_INDEX_CR_QP_OFFSET (49) + #define MVE_SET_OPT_INDEX_LAMBDA_SCALE (50) /* encode, data.lambda_scale */ + #define MVE_SET_OPT_INDEX_ENC_MAX_NUM_CORES (51) /* maximum number of cores */ + #define MVE_SET_OPT_INDEX_ENC_EXTRA_REFS (52) /* configure number of extra ref buffers */ + #define MVE_SET_OPT_INDEX_QP_DELTA_RAW_I_P (53) + #define MVE_SET_OPT_INDEX_QP_DELTA_RAW_I_B_REF (54) + #define MVE_SET_OPT_INDEX_QP_DELTA_RAW_I_B_NONREF (55) + #define MVE_SET_OPT_INDEX_ENC_FIXED_QP (56) + /* ARBITRARY_DOWNSCALE */ + #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE (57) /* decode, set downscaled width and height */ + #define MVE_SET_OPT_INDEX_FLUSHLESS_REFBANK (58) /* configure AFBC ref bank for individual buffer + * allocation. Forced internally for flushless + * resolution change codecs */ + #define MVE_SET_OPT_INDEX_ENC_SAO_LUMA_EN (60) + #define MVE_SET_OPT_INDEX_ENC_SAO_CHROMA_EN (61) + + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_LEFT (62) + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_RIGHT (63) + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_TOP (64) + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_BOTTOM (65) + /* LONG_TERM_REFERENCE */ + #define MVE_SET_OPT_INDEX_ENC_LTR_MODE (66) + #define MVE_SET_OPT_INDEX_ENC_LTR_PERIOD (67) + #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE_POS_MODE (69) + #define MVE_SET_OPT_INDEX_MINI_FRAME_MAX (70) /* max cnt of mini frames*/ + /* Encode Data Statistics */ + #define MVE_SET_OPT_INDEX_ENC_STATS_MODE (71) + #define MVE_SET_OPT_INDEX_ENC_MULTI_SPS_PPS (73) + #define MVE_SET_OPT_INDEX_ENC_INIT_QPI (74) + #define MVE_SET_OPT_INDEX_ENC_INIT_QPP (75) + #define MVE_SET_OPT_INDEX_ENC_INIT_QPB (76) + + #define MVE_SET_OPT_INDEX_ENC_IPENALTY_ANGULAR (77) + #define MVE_SET_OPT_INDEX_ENC_IPENALTY_PLANAR (78) + #define MVE_SET_OPT_INDEX_ENC_IPENALTY_DC (79) + + #define MVE_SET_OPT_INDEX_ENC_RC_CLIP_TOP (80) + #define MVE_SET_OPT_INDEX_ENC_RC_CLIP_BOTTOM (81) + #define MVE_SET_OPT_INDEX_ENC_QPMAP_CLIP_TOP (82) + #define MVE_SET_OPT_INDEX_ENC_QPMAP_CLIP_BOTTOM (83) + + #define MVE_SET_OPT_INDEX_ENC_REF_RING_BUFFER (84) + #define MVE_SET_OPT_INDEX_ENC_JPEG_RC (85) //MVE_ENC_RC_JPEG + #define MVE_SET_OPT_INDEX_ENC_RC_I_BIT_RATIO (86) /* RC_I_BIT_RATIO */ + #define MVE_SET_OPT_INDEX_ENC_RC_I_BIT_MODE (87) /* RC_I_BIT_RATIO */ + /* SVCT3 level-1 peroid */ + #define MVE_SET_OPT_INDEX_ENC_SVCT3_LEVEL1_PEROID (88) + #define MVE_SET_OPT_iNDEX_ENC_VISUAL_ENABLE (89)//enable_visual FW CODE IS iNDEX + /* GDR */ + #define MVE_SET_OPT_INDEX_ENC_GDR_NUMBER (90) + #define MVE_SET_OPT_INDEX_ENC_GDR_PERIOD (91) + #define MVE_SET_OPT_INDEX_SCD_ENABLE (93) + #define MVE_SET_OPT_INDEX_SCD_PERCENT (94) + #define MVE_SET_OPT_INDEX_SCD_THRESHOLD (95) + /* for aq, add new para */ + #define MVE_SET_OPT_INDEX_ENC_AQ_SSIM_EN (96) + #define MVE_SET_OPT_INDEX_ENC_AQ_NEG_RATIO (97) + #define MVE_SET_OPT_INDEX_ENC_AQ_POS_RATIO (98) + #define MVE_SET_OPT_INDEX_ENC_AQ_QPDELTA_LMT (99) + #define MVE_SET_OPT_INDEX_ENC_AQ_INIT_FRM_AVG_SVAR (100) + #define MVE_SET_OPT_INDEX_SCD_ADAPTIVE_I (101) + #define MVE_SET_OPT_INDEX_DEC_YUV2RGB_PARAMS (103) + #define MVE_SET_OPT_INDEX_ENC_FORCED_UV_VAL (104) + #define MVE_SET_OPT_INDEX_DEC_DSL_INTERP_MODE (105) + + #define MVE_SET_OPT_INDEX_ENC_SRC_CROPPING (106) + #define MVE_SET_OPT_INDEX_DEC_DST_CROPPING (107) //ENABLE_DST_CROP + #define MVE_SET_OPT_INDEX_DEC_AV1_FSF (1004) //FAST_SHOW_FRAME + + #define MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_ANGULAR (2011) + #define MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_PLANAR (2012) + #define MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_DC (2013) + + union + { + uint32_t arg; /* Most options only need a uint32_t as argument */ + /* For option MVE_SET_OPT_INDEX_NALU_FORMAT, arg should + * be one of these: */ + #define MVE_OPT_NALU_FORMAT_START_CODES (1) + #define MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER (2) + #define MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD (4) + #define MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD (8) + #define MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD (16) + /* For option MVE_SET_OPT_INDEX_GOP_TYPE, arg should + * be one of these: */ + #define MVE_OPT_GOP_TYPE_BIDIRECTIONAL (1) + #define MVE_OPT_GOP_TYPE_LOW_DELAY (2) + #define MVE_OPT_GOP_TYPE_PYRAMID (3) + #define MVE_OPT_GOP_TYPE_SVCT3 (4) + #define MVE_OPT_GOP_TYPE_GDR (5) + /* For option MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE, + * arg should be one of these: */ + #define MVE_OPT_VP9_PROB_UPDATE_DISABLED (0) + #define MVE_OPT_VP9_PROB_UPDATE_IMPLICIT (1) + #define MVE_OPT_VP9_PROB_UPDATE_EXPLICIT (2) + /* For option MVE_SET_OPT_INDEX_DISABLE_FEATURES, arg + * should be a bitmask with features to disable: */ + #define MVE_OPT_DISABLE_FEATURE_AFBC_COMP (0x00000001) /* VDMA AFBC Compression */ + #define MVE_OPT_DISABLE_FEATURE_REF_CACHE (0x00000002) /* REF caching */ + #define MVE_OPT_DISABLE_FEATURE_DEBLOCK (0x00000004) /* Deblocking */ + #define MVE_OPT_DISABLE_FEATURE_SAO (0x00000008) /* SAO */ + #define MVE_OPT_DISABLE_FEATURE_PIC_OUTPUT (0x00000020) /* Picture Output Removal */ + #define MVE_OPT_DISABLE_FEATURE_PIPE (0x00000040) /* Pipe (i.e. parser-only) */ + #define MVE_OPT_DISABLE_FEATURE_SLEEP (0x00000080) /* Clock gating + * (SOC_SYSCTRL.SLEEP bit) */ + #define MVE_OPT_DISABLE_FEATURE_AFBC_LEGACY_REF (0x00000100) /* Enables tiled AFBC format in + * reference buffers. Ignored + * for decode AFBC output */ + #define MVE_OPT_DISABLE_FEATURE_REF_PICS (0x00000400) /* Forces use of static 16x16 + * reference pics */ + #define MVE_OPT_DISABLE_FEATURE_REFSZ_LIMIT (0x00001000) /* Disable REFSZ bw limit */ + #define MVE_OPT_DISABLE_FEATURE_SUPPORT_NONIBC_TILE (0x00010000) /* AV1 decode, forces use tiles for AFBC */ + /* For options MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE + * and MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE, arg + * should be a bitmask of MVE_MBTYPEs: */ + #define MVE_MBTYPE_4x4 (0x00000001) /* 4x4 inter */ + #define MVE_MBTYPE_4x8 (0x00000002) /* 4x8 inter */ + #define MVE_MBTYPE_8x4 (0x00000004) /* 8x4 inter */ + #define MVE_MBTYPE_8x8 (0x00000008) /* 8x8 inter */ + #define MVE_MBTYPE_8x16 (0x00000010) /* 8x16 inter */ + #define MVE_MBTYPE_16x8 (0x00000020) /* 16x8 inter */ + #define MVE_MBTYPE_16x16 (0x00000040) /* 16x16 inter */ + #define MVE_MBTYPE_PSKIP (0x00000080) /* P Skip inter */ + #define MVE_MBTYPE_I4x4 (0x00000100) /* 4x4 intra */ + #define MVE_MBTYPE_I8x8 (0x00000200) /* 8x8 intra */ + #define MVE_MBTYPE_I16x16 (0x00000400) /* 16x16 intra */ + #define MVE_MBTYPE_I32x32 (0x00000800) /* 32x32 intra */ + #define MVE_MBTYPE_16x32 (0x00001000) /* 16x32 inter */ + #define MVE_MBTYPE_32x16 (0x00002000) /* 32x16 inter */ + #define MVE_MBTYPE_32x32 (0x00004000) /* 32x32 inter */ + /* For option MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE, + * arg should be one of these: */ + #define MVE_OPT_RGB_TO_YUV_BT601_STUDIO (0) + #define MVE_OPT_RGB_TO_YUV_BT601_FULL (1) + #define MVE_OPT_RGB_TO_YUV_BT709_STUDIO (2) + #define MVE_OPT_RGB_TO_YUV_BT709_FULL (3) + #define MVE_OPT_RGB_TO_YUV_BT2020_STUDIO (4) + #define MVE_OPT_RGB_TO_YUV_BT2020_FULL (5) + /* For option MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES, + * arg should be one of: */ + #define MVE_OPT_REF_OUTPUT_NONE (0) /* No REF output */ + #define MVE_OPT_REF_OUTPUT_USED_FOR_REF (1) /* Output reference frames */ + #define MVE_OPT_REF_OUTPUT_ALL (2) /* Output/reconstruct all frames */ + struct + { + uint16_t profile; + /* AVC/H.264 profiles */ + #define MVE_OPT_PROFILE_H264_BASELINE (1) + #define MVE_OPT_PROFILE_H264_MAIN (2) + #define MVE_OPT_PROFILE_H264_HIGH (3) + /* HEVC/H.265 profiles */ + #define MVE_OPT_PROFILE_H265_MAIN (1) + #define MVE_OPT_PROFILE_H265_MAIN_STILL (2) + #define MVE_OPT_PROFILE_H265_MAIN_INTRA (3) + #define MVE_OPT_PROFILE_H265_MAIN_10 (4) + /* VC-1 profiles */ + #define MVE_OPT_PROFILE_VC1_SIMPLE (1) + #define MVE_OPT_PROFILE_VC1_MAIN (2) + #define MVE_OPT_PROFILE_VC1_ADVANCED (3) + /* VP8 profiles */ + #define MVE_OPT_PROFILE_VP8_MAIN (1) + uint16_t level; + /* AVC/H.264 levels */ + #define MVE_OPT_LEVEL_H264_1 (1) + #define MVE_OPT_LEVEL_H264_1b (2) + #define MVE_OPT_LEVEL_H264_11 (3) + #define MVE_OPT_LEVEL_H264_12 (4) + #define MVE_OPT_LEVEL_H264_13 (5) + #define MVE_OPT_LEVEL_H264_2 (6) + #define MVE_OPT_LEVEL_H264_21 (7) + #define MVE_OPT_LEVEL_H264_22 (8) + #define MVE_OPT_LEVEL_H264_3 (9) + #define MVE_OPT_LEVEL_H264_31 (10) + #define MVE_OPT_LEVEL_H264_32 (11) + #define MVE_OPT_LEVEL_H264_4 (12) + #define MVE_OPT_LEVEL_H264_41 (13) + #define MVE_OPT_LEVEL_H264_42 (14) + #define MVE_OPT_LEVEL_H264_5 (15) + #define MVE_OPT_LEVEL_H264_51 (16) + #define MVE_OPT_LEVEL_H264_52 (17) + #define MVE_OPT_LEVEL_H264_6 (18) + #define MVE_OPT_LEVEL_H264_61 (19) + #define MVE_OPT_LEVEL_H264_62 (20) + #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE (32) + /* The value (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + level_idc) encodes a user + * supplied level_idc value in the range 0 to 255 inclusive. If the host supplies a level_idc + * value by this method then the encoder will encode this level_idc value in the bitstream + * without checking the validity of the level_idc value + */ + #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_MAX (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + 255) + /* HEVC/H.265 levels */ + #define MVE_OPT_LEVEL_H265_MAIN_TIER_1 (1) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_1 (2) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_2 (3) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_2 (4) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_21 (5) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_21 (6) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_3 (7) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_3 (8) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_31 (9) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_31 (10) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_4 (11) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_4 (12) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_41 (13) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_41 (14) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_5 (15) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_5 (16) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_51 (17) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_51 (18) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_52 (19) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_52 (20) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_6 (21) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_6 (22) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_61 (23) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_61 (24) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_62 (25) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_62 (26) + } profile_level; + struct + { + int32_t mv_search_range_x; + int32_t mv_search_range_y; + } motion_vector_search_range; + struct + { + uint32_t type; + #define MVE_OPT_HUFFMAN_TABLE_DC_LUMA (1) + #define MVE_OPT_HUFFMAN_TABLE_AC_LUMA (2) + #define MVE_OPT_HUFFMAN_TABLE_DC_CHROMA (3) + #define MVE_OPT_HUFFMAN_TABLE_AC_CHROMA (4) + uint8_t number_of_huffman_of_code_length[ 16 ]; + uint8_t table[ 162 ]; /* 12 are used for DC, 162 for AC */ + } huffman_table; + struct + { + uint32_t type; + #define MVE_OPT_QUANT_TABLE_LUMA (1) + #define MVE_OPT_QUANT_TABLE_CHROMA (2) + uint8_t matrix[ 64 ]; + } quant_table; + struct + { + uint32_t qscale; + uint32_t qscale_luma; + uint32_t qscale_chroma; + uint32_t fps; + } jpeg_rate_control; //MVE_ENC_RC_JPEG + struct + { + /* For HEVC, tile_cols must be zero. For VP9, tile_rows + * and tile_cols must be powers of 2. */ + uint16_t tile_rows; + uint16_t tile_cols; + } tiles; + struct + { + uint16_t luma_bitdepth; + uint16_t chroma_bitdepth; + } bitdepth; + struct + { + /* Scale factors, and their square roots, for the lambda + * coefficients used by the encoder, in unsigned Q8 fixed-point + * format. Default (no scaling) is 1.0 (so 0x0100 in hex). + */ + uint16_t lambda_scale_i_q8; + uint16_t lambda_scale_sqrt_i_q8; + uint16_t lambda_scale_p_q8; + uint16_t lambda_scale_sqrt_p_q8; + uint16_t lambda_scale_b_ref_q8; + uint16_t lambda_scale_sqrt_b_ref_q8; + uint16_t lambda_scale_b_nonref_q8; + uint16_t lambda_scale_sqrt_b_nonref_q8; + } lambda_scale; + /* ARBITRARY_DOWNSCALE */ + struct + { + uint16_t width; + uint16_t height; + } downscaled_frame; + struct + { + uint32_t mode; + } dsl_pos; + struct + { + int16_t coef[3][3]; //coef[Y|U|V][R|G|B] + uint16_t offset[3]; + } yuv2rgb_params; + struct + { + uint8_t rgb2yuv_mode; + int16_t coef[3 * 3]; //coef[Y|U|V][R|G|B] + uint8_t luma_range[2]; + uint8_t chroma_range[2]; + uint8_t rgb_range[2]; + } rgb2yuv_params; + struct + { + uint16_t value; + } gray_uv_value; + struct + { + uint16_t mode; + } interp_mode; + struct + { + uint8_t crop_en; + /* left start x of luma in original image */ + uint16_t x; //included + /* top start y of luma in original image */ + uint16_t y; //included + /* cropped width of luma in original image */ + uint16_t width; + /* cropped height of luma in original image */ + uint16_t height; + } enc_src_crop; + + struct + { + uint8_t crop_en; + /* left start x of luma in original image */ + uint16_t x; //included + /* top start y of luma in original image */ + uint16_t y; //included + /* cropped width of luma in original image */ + uint16_t width; + /* cropped height of luma in original image */ + uint16_t height; + } dec_dst_crop; //ENABLE_DST_CROP + } data; +}; + +struct mve_request_release_ref_frame +{ + /* Decode: For a frame buffer that MVE has returned + * marked as _REF_FRAME, the host can send this message + * to ask the MVE to release the buffer as soon as it is + * no longer used as reference anymore. (Otherwise, in + * normal operation, the host would re-enqueue the buffer + * to the MVE when it has been displayed and can be over- + * written with a new frame.) + * + * Note: When a frame is no longer used as reference depends + * on the stream being decoded, and there is no way to + * guarantee a short response time, the response may not + * come until the end of the stream. + * + * Encode: Return this reference buffer to the firmware + * so it can be reused. This is only useful when the + * MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES is used and reference + * frames are reported by events (and must be returned). + */ + uint32_t buffer_address; +}; + + + + + + + + + + + + + + +/********************************************************************* + * + * RESPONSEs are messages from the host to the firmware + * + * Some of the MVE_RESPONSE_CODE_ codes are followed by one of the + * structs below. + * + *********************************************************************/ + +/* Sent when firmware has booted. + */ +struct mve_response_switched_in +{ + uint32_t core; +}; + +/* Sent when last core in a session has switched out. + */ +struct mve_response_switched_out +{ + uint32_t core; + uint32_t reason; + uint32_t sub_reason; +}; + +/* Response confirming state transition after either GO or STOP + * command from host. + */ +struct mve_response_state_change +{ + uint32_t new_state; + #define MVE_STATE_STOPPED (0) + #define MVE_STATE_RUNNING (2) +}; + +/* Message sent when the all cores in the session have dequeued a + * job from the firmware job queue. + */ +struct mve_response_job_dequeued +{ + uint32_t valid_job; +}; + +/* Fatal error message from firmware, if sent then no further + * operation is possible. + */ +struct mve_response_error +{ + uint32_t error_code; + #define MVE_ERROR_ABORT (1) + #define MVE_ERROR_OUT_OF_MEMORY (2) + #define MVE_ERROR_ASSERT (3) + #define MVE_ERROR_UNSUPPORTED (4) + #define MVE_ERROR_INVALID_BUFFER (6) + #define MVE_ERROR_INVALID_STATE (8) + #define MVE_ERROR_WATCHDOG (9) + + #define MVE_MAX_ERROR_MESSAGE_SIZE (128) + char message[ MVE_MAX_ERROR_MESSAGE_SIZE ]; +}; + +/* When a set-option succeeds, a confirmation message is + * sent, including the index-code for that particular option. + */ +struct mve_response_set_option_confirm +{ + uint32_t index; /* Same as 'index' in struct mve_request_set_option */ +}; + +/* If a set-option request fails, this message is returned. + * This is not a fatal error. The set-option had no effect, + * and the session is still alive. + * For example, trying to set an option with a too large + * or small parameter would result in this message. + * The included text string is meant for development and + * debugging purposes only. + * (When a set-option succeeds the set-option-confirm + * message code is sent instead.) + */ +struct mve_response_set_option_fail +{ + uint32_t index; /* Same as 'index' in struct mve_request_set_option */ + char message[ MVE_MAX_ERROR_MESSAGE_SIZE ]; +}; + +/* Decode only: This message is sent from MVE to the host so that it can + * allocate large enough output buffers. Output buffers that are to small + * will be returned to the host marked as 'rejected'. + */ +struct mve_response_frame_alloc_parameters +{ + /* Please note that the below information is a hint + * for what buffers to allocate, it does not say + * what actual resolution an output picture has. + */ + + /* To use if allocating PLANAR YUV output buffers: */ + uint16_t planar_alloc_frame_width; + uint16_t planar_alloc_frame_height; + + /* To use if allocating AFBC output buffers + * (if interlace, each field needs this size): + */ + uint32_t afbc_alloc_bytes; + + /* For situations where downscaled AFBC is supported, + * this number of bytes is needed for the downscaled frame. + */ + uint32_t afbc_alloc_bytes_downscaled; + + /* When the host allocates an AFBC frame buffer, it should normally set + * the the afbc_width_in_superblocks to be at least this recommended value. + * Buffers with smaller values are likely to be returned rejected by the MVE. + * See also comments above for afbc_alloc_bytes and + * afbc_alloc_bytes_downscaled, they describe the situations where the + * different values are used. + */ + uint16_t afbc_width_in_superblocks; + uint16_t afbc_width_in_superblocks_downscaled; + + /* For PLANAR YUV output, every plane's address need to be adjusted to get + * optimal AXI bursts when the pixel data is written, the values below may + * be used to calculate address offsets. + */ + uint16_t cropx; + uint16_t cropy; + + uint32_t mbinfo_alloc_bytes; /* Only for debugging */ + + + /* downscaled frame width/height for decode */ + /* ARBITRARY_DOWNSCALE */ + uint16_t dsl_frame_width; + uint16_t dsl_frame_height; + uint16_t dsl_pos_mode; + uint8_t ctu_size; /* EXPORT_SEQ_INFO */ + /* ENABLE_DST_CROP*/ + uint16_t dst_crop_x; + uint16_t dst_crop_y; + uint16_t dst_crop_width; + uint16_t dst_crop_height; +}; + +/* Decode only: This message is sent from MVE to the host so that it can + * allocate suitable output buffers. The needed size of the buffer is sent + * in a separate message (above). + * When MVE sends the message below, it enters a waiting-state and will not + * make any progress until the host sends an output-flush command, upon + * which MVE will return all output buffers, followed by a message saying + * that the output has been flushed. Only then should the host start + * enqueueing new output buffers. + */ +struct mve_response_sequence_parameters +{ + /* Other stream parameters affecting buffer allocation, + * any change in these values will trigger a flush. + */ + uint8_t interlace; /* 0 or 1 */ + uint8_t chroma_format; + #define MVE_CHROMA_FORMAT_MONO (0x0) + #define MVE_CHROMA_FORMAT_420 (0x1) + #define MVE_CHROMA_FORMAT_422 (0x2) + #define MVE_CHROMA_FORMAT_440 (0x3) + #define MVE_CHROMA_FORMAT_ARGB (0x4) + #define MVE_CHROMA_FORMAT_RGB (0x5) + #define MVE_CHROMA_FORMAT_444 (0x6) + #define MVE_CHROMA_FORMAT_OSD_ARGB (0x7) + + uint8_t bitdepth_luma; /* 8, 9 or 10 */ + uint8_t bitdepth_chroma; /* 8, 9 or 10 */ + uint8_t num_buffers_planar; /* number of planar buffers needed */ + uint8_t num_buffers_afbc; /* number of AFBC buffers needed, for + * AFBC output more buffers are needed + * (for planar output, the firmware + * will allocate extra memory via RPC) + */ + uint8_t range_mapping_enabled; /* VC-1 AP specific feature, if enabled + * then AFBC buffers may need special + * filtering before they can be + * displayed correctly. If the host is + * not able to do that, then planar output + * should be used, for which MVE + * automatically performs the filtering. + */ + uint8_t reserved0; +}; + +struct mve_response_ref_frame_unused +{ + /* Decode only: If requested by the host with the message + * MVE_REQUEST_CODE_RELEASE_REF_FRAME, the MVE will respond + * with this message when (if ever) the buffer is no longer + * used. + */ + uint32_t unused_buffer_address; +}; + + +/* This message is only for debugging and performance profiling. + * Is sent by the firmware if the corresponding options is enabled. + */ +struct mve_event_processed +{ + uint8_t pic_format; + uint8_t qp; + uint8_t pad0; + uint8_t pad1; + uint32_t parse_start_time; /* Timestamp, absolute time */ + uint32_t parse_end_time; /* Timestamp, absolute time */ + uint32_t parse_idle_time; /* Definition of idle here is waiting for in/out buffers or available RAM */ + + uint32_t pipe_start_time; /* Timestamp */ + uint32_t pipe_end_time; /* Timestamp, end-start = process time. Idle time while in a frame is + * not measured. */ + uint32_t pipe_idle_time; /* Always 0 in decode, */ + + uint32_t parser_coreid; /* Core used to parse this frame */ + uint32_t pipe_coreid; /* Core used to pipe this frame */ + + uint32_t bitstream_bits; /* Number of bitstream bits used for this frame. */ + + uint32_t intermediate_buffer_size; /* Size of intermediate (mbinfo/residuals) buffer after this frame was + * parsed. */ + uint32_t total_memory_allocated; /* after the frame was parsed. Including reference frames. */ + + uint32_t bus_read_bytes; /* bus read bytes */ + uint32_t bus_write_bytes; /* bus written bytes */ + + uint32_t afbc_bytes; /* afbc data transferred */ + + uint32_t slice0_end_time; /* Timestamp, absolute time */ + uint32_t stream_start_time; /* Timestamp, absolute stream start time */ + uint32_t stream_open_time; /* Timestamp, absolute stream open time */ +}; + +/* This message is sent by the firmware if the option + * MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES is enabled + */ +struct mve_event_ref_frame +{ + uint32_t ref_addr; /* MVE virtual address of AFBC reference frame */ + uint32_t ref_width; /* Width of display area in luma pixels */ + uint32_t ref_height; /* Height of display area in luma pixels */ + uint32_t ref_mb_width; /* Width in macroblocks */ + uint32_t ref_mb_height; /* Height in macroblocks */ + uint32_t ref_left_crop; /* Left crop in luma pixels */ + uint32_t ref_top_crop; /* Top crop in luma pixels */ + uint32_t ref_frame_size; /* Total AFBC frame size in bytes */ + uint32_t ref_display_order; /* Display picture order count */ + uint16_t bit_width; /* Bit width of the YUV either 8 or 10 */ + uint16_t tiled_headers; /* AFBC format is tiled */ + uint64_t user_data_tag; /* User data tag of corresponding input buffer */ +}; + +/* This message is only for debugging, is sent by the firmware if event tracing + * is enabled. + */ +struct mve_event_trace_buffers +{ + uint16_t reserved; + uint8_t num_cores; + uint8_t rasc_mask; + #define MVE_MAX_TRACE_BUFFERS 40 + /* this array will contain one buffer per rasc in rasc_mask per num_core */ + struct + { + uint32_t rasc_addr; /* rasc address of the buffer */ + uint32_t size; /* size of the buffer in bytes */ + } buffers[MVE_MAX_TRACE_BUFFERS]; +}; + +/* 'Events' are informative messages, the host is not required to react in + * any particular way. + */ +struct mve_response_event +{ + uint32_t event_code; + #define MVE_EVENT_ERROR_STREAM_CORRUPT (1) /* message, text string */ + #define MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED (2) /* message, text string */ + #define MVE_EVENT_PROCESSED (3) /* struct mve_event_processed */ + #define MVE_EVENT_REF_FRAME (4) /* struct mve_event_ref_frame */ + #define MVE_EVENT_TRACE_BUFFERS (5) /* struct mve_event_trace_buffers */ + union + { + struct mve_event_processed event_processed; + struct mve_event_ref_frame event_ref_frame; + struct mve_event_trace_buffers event_trace_buffers; + char message[ MVE_MAX_ERROR_MESSAGE_SIZE ]; + } event_data; +}__attribute__((packed)); + + + + + + + + + + + + + + + + +/********************************************************************* + * + * BUFFERs are sent both ways, from host to firmware and back again + * + * Each MVE_BUFFER_CODE_ code is followed by one of the structs + * below. + * + *********************************************************************/ + +/* Flags in mve_buffer_frame::frame_flags: + * Set by whom? Meaning: + * DECODE: ENCODE: + * MVE_BUFFER_FRAME_FLAG_INTERLACE host - Buffer is interlaced (both top and + * bottom fields are allocated) + * MVE_BUFFER_FRAME_FLAG_BOT_FIRST fw - Bottom field should be displayed + * first (only if interlaced) + * MVE_BUFFER_FRAME_FLAG_TOP_PRESENT fw host Top field present (or full frame if + * not interlaced) + * MVE_BUFFER_FRAME_FLAG_BOT_PRESENT fw - Bottom present (only if interlaced) + * + * MVE_BUFFER_FRAME_FLAG_ROTATION_* host host Decode: MVE will rotate the output frame + * according to this setting. + * Encode: MVE will rotate the input frame + * according to this setting before + * encoding them. + * MVE_BUFFER_FRAME_FLAG_SCALING_MASK host - Output pictures should be downscaled + * + * MVE_BUFFER_FRAME_FLAG_MIRROR_* - host Input frame should be mirrored before encoding + * + * MVE_BUFFER_FRAME_FLAG_REJECTED fw - Buffer was too small, host should re-allocate + * + * MVE_BUFFER_FRAME_FLAG_CORRUPT fw - Frame contains visual corruption + * + * MVE_BUFFER_FRAME_FLAG_DECODE_ONLY fw - Frame should not be displayed + * + * MVE_BUFFER_FRAME_FLAG_REF_FRAME fw - Frame is used by MVE as reference, host must + * not change, just re-enqueue when displayed + * MVE_BUFFER_FRAME_FLAG_EOS fw host This is the last frame in the stream. + */ + +/* mve_buffer_frame_planar stores uncompressed YUV pictures. + * ________________________________________ + * | ^ | | ^ + * |<-:--visible_frame_width---->| | : + * | : | | : + * | : | | : + * | visible_frame_height | | max_frame_height + * | : | | : + * | : | | : + * |__v__________________________| | : + * | | : + * |<-------------max_frame_width---------->| : + * |________________________________________| v + * + */ +struct mve_buffer_frame_planar +{ + /* Y,Cb,Cr top field */ + uint32_t plane_top[ 3 ]; + + /* Y,Cb,Cr bottom field (interlace only) */ + uint32_t plane_bot[ 3 ]; + + /* Stride between rows, in bytes */ + int32_t stride[ 3 ]; + + /* Size of largest frame allowed to put in this buffer */ + uint16_t max_frame_width; + uint16_t max_frame_height; + +}; + +/* mve_buffer_frame_afbc stores AFBC compressed content that is also used + * as the reference frame. Out of loop processing (crop, rotation, + * range reduction) must be supported by the user of this buffer and + * the parameters are signaled within the buffer descriptor below. + * ________________________________________ + * | ^ | + * | cropy | + * | v_____________________________ | + * |<-cropx->| ^ || + * | |<-:--visible_frame_width---->|| + * | | : || + * | | : || + * | | visible_frame_height || + * | | : || + * | | : || + * | |__v__________________________|| + * |________________________________________| + * + * <----- superblock_width ---------------> + * * afbc_width_in_superblocks + * + * Note that the sizes and cropping values need not be multiples of 16. + * + * For interlaced streams, the values refer to a full frame, + * while the output is actually separated into fields. Thus for fields, + * cropy and visible_frame_height should be divided by two. + * + * For dual-downscaled AFBC output (not supported for interlace), + * then the cropx, cropy, visible_frame_width and visible_frame_height + * should be divided by two for the downscaled plane. + */ +struct mve_buffer_frame_afbc +{ + uint32_t plane[ 2 ]; /* Addresses for up to two AFBC planes: + * Top and bottom fields for interlace, + * or standard and optional downscaled output. */ + uint32_t alloc_bytes[ 2 ]; /* Size of allocation for each plane */ + uint16_t cropx; /* Luma x crop */ + uint16_t cropy; /* Luma y crop */ + uint16_t afbc_width_in_superblocks[ 2 ]; /* Width of AFBC frame buffer, in units + * of superblock width (32 or 16). + * If dual-downscaled output is chosen, + * this width can be different for the + * two planes. + * For first plane: + * (cropx + frame_width) + * <= superblock_width * afbc_width... + */ + uint32_t afbc_params; /* AFBC parameters */ + #define MVE_BUFFER_FRAME_AFBC_TILED_BODY (0x00000001) /* Output body blocks should be tiled */ + #define MVE_BUFFER_FRAME_AFBC_TILED_HEADER (0x00000002) /* Output headers should be tiled */ + #define MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK (0x00000004) /* Super block is 32x8, default is 16x16, + * (only supported as input for encode) */ + #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_8BIT (0x00000008) /* For downscaled AFBC plane: It shall + * be 8-bit, even if full-scale is 10-bit */ + #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_420 (0x00000010) /* For downscaled AFBC plane: It shall + * be 4:2:0, even if full-scale is 4:2:2 */ + #define MVE_BUFFER_FRAME_AFBC_STRIDE_SET_BY_MVE (0x00000020) /* Decode only: By default, the host should + set the afbc_width_in_superblocks. If the + value is zero, or if this bit is set, then + the MVE sets an appropriate value. */ + #define MVE_BUFFER_FRAME_AFBC_BLOCK_SPLIT (0x00000040) /* For Superblock layout, block_split mode should be enabled*/ + #define MVE_BUFFER_FRAME_AFBC_AV1_DECODER (0x00000080) /* Indicate av1dec or other codec*/ + #define MVE_BUFFER_FRAME_AFBC_AV1_TILE_HEADER (0x00000100) /* If av1dec, the tileheader is not decided by MVE_BUFFER_FRAME_AFBC_TILED_HEADER */ + #define MVE_BUFFER_FRAME_AFBC_AV1_MONO_CHROMA (0x00000200) /* If av1dec, mono-chrome afbc */ + +}; + +/* output from encoder, encoder statistics. + * buffer parameter to a buffer of this encoder statistics + * this struct indicates each size of statistics. + */ + +struct mve_buffer_param_enc_stats +{ + uint32_t mms_buffer_size; + uint32_t bitcost_buffer_size; + uint32_t qp_buffer_size; + uint32_t flags; + //ENC_STATS_FLAGS + #define MVE_BUFFER_ENC_STATS_FLAG_MMS (1<<0) + #define MVE_BUFFER_ENC_STATS_FLAG_BITCOST (1<<1) + #define MVE_BUFFER_ENC_STATS_FLAG_QP (1<<2) + #define MVE_BUFFER_ENC_STATS_FLAG_DROP (1<<3) + uint16_t stats_mb_width; + uint16_t stats_mb_height; +}; + +/* + * The FRAME buffer stores the common information for PLANAR and AFBC buffers, + * and a union of PLANAR and AFBC specific information. + */ +struct mve_buffer_frame +{ + /* For identification of the buffer, this is not changed by + * the firmware. */ + uint64_t host_handle; + + /* For matching input buffer with output buffers, the firmware + * copies these values between frame buffers and bitstream buffers. */ + uint64_t user_data_tag; + #define OSD1_TAG 0x40000000 /* use bit30 stand for osd_1 */ + #define OSD0_TAG 0x20000000 /* use bit30 stand for osd_1 */ + + /* Frame buffer flags, see commentary above */ + uint32_t frame_flags; + #define MVE_BUFFER_FRAME_FLAG_INTERLACE (0x00000001) + #define MVE_BUFFER_FRAME_FLAG_BOT_FIRST (0x00000002) + #define MVE_BUFFER_FRAME_FLAG_TOP_PRESENT (0x00000004) + #define MVE_BUFFER_FRAME_FLAG_BOT_PRESENT (0x00000008) + #define MVE_BUFFER_FRAME_FLAG_ROTATION_90 (0x00000010) + #define MVE_BUFFER_FRAME_FLAG_ROTATION_180 (0x00000020) + #define MVE_BUFFER_FRAME_FLAG_ROTATION_270 (0x00000030) + #define MVE_BUFFER_FRAME_FLAG_SCALING_MASK (0x000000C0) + #define MVE_BUFFER_FRAME_FLAG_MIRROR_HORI (0x00000100) + #define MVE_BUFFER_FRAME_FLAG_MIRROR_VERT (0x00000200) + #define MVE_BUFFER_FRAME_FLAG_FORCE_IDR (0x00000400) + #define MVE_BUFFER_FRAME_FLAG_RESET_GOP (0x00000800) + #define MVE_BUFFER_FRAME_FLAG_REJECTED (0x00001000) + #define MVE_BUFFER_FRAME_FLAG_CORRUPT (0x00002000) + #define MVE_BUFFER_FRAME_FLAG_DECODE_ONLY (0x00004000) + #define MVE_BUFFER_FRAME_FLAG_REF_FRAME (0x00008000) + #define MVE_BUFFER_FRAME_FLAG_EOS (0x00010000) + #define MVE_BUFFER_FRAME_FLAG_RESET_LTR_PEROID (0x00020000) + #define MVE_BUFFER_FRAME_FLAG_RESET_RC (0x00040000) + /*ARBITRARY_DOWNSCALE*/ + #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKX (0xFF000000) //8bit + #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKY (0x00FE0000) //7bit + + /* Height (in luma samples) of visible part of frame, + * may be smaller than allocated frame size. */ + uint16_t visible_frame_height; + + /* Width (in luma samples) of visible part of frame, + * may be smaller than allocated frame size. */ + uint16_t visible_frame_width; + + /* Color format of buffer */ + uint16_t format; + /* format bitfield: */ + #define MVE_FORMAT_BF_C (0) /* 3 bits, chroma subsampling */ + #define MVE_FORMAT_BF_B (4) /* 4 bits, max bitdepth minus 8 */ + #define MVE_FORMAT_BF_N (8) /* 2 bits, number of planes */ + #define MVE_FORMAT_BF_V (12) /* 2 bits, format variant */ + #define MVE_FORMAT_BF_A (15) /* 1 bit, AFBC bit */ + /* formats: */ + #define MVE_FORMAT_YUV420_AFBC_8 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV420_AFBC_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV422_AFBC_8 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV422_AFBC_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV420_I420 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_I420_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 10 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_NV12 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_NV21 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_P010 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (16 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_2P_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_Y0L2 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_AQB1 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_YUY2 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_UYVY ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_Y210 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( (16 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_1P_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_RGBA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_BGRA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_ARGB_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 2 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_ABGR_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 3 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_Y ( (MVE_CHROMA_FORMAT_MONO << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_Y_10 ( (MVE_CHROMA_FORMAT_MONO << MVE_FORMAT_BF_C) | \ + ( ( 10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_RGB_888 ( (MVE_CHROMA_FORMAT_RGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_BGR_888 ( (MVE_CHROMA_FORMAT_RGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_RGB_3P ( (MVE_CHROMA_FORMAT_RGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_YUV444 ( (MVE_CHROMA_FORMAT_444 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_YUV444_10 ( (MVE_CHROMA_FORMAT_444 << MVE_FORMAT_BF_C) | \ + ( ( 10 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_ARGB_1555 ( (MVE_CHROMA_FORMAT_OSD_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_ARGB_4444 ( (MVE_CHROMA_FORMAT_OSD_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_RGB_565 ( (MVE_CHROMA_FORMAT_OSD_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 2 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_MBINFO (0x0001) /* only used for debugging */ + + #define MVE_FORMAT_UNUSED (0x0000) + + uint16_t reserved0; /* force 'data' to be 4-byte aligned */ + + union + { + struct mve_buffer_frame_planar planar; + struct mve_buffer_frame_afbc afbc; + } data; + + //uint32_t reserved1; /* force size to be multiple of 8 bytes */ + uint16_t mini_frame_y_start; + uint16_t mini_frame_y_end; +}; + +/* The bitstream buffer stores a number of bitstream bytes */ +struct mve_buffer_bitstream +{ + /* For identification of the buffer, this is not changed by + * the firmware. */ + uint64_t host_handle; + + /* For matching input buffer with output buffers, the firmware + * copies these values between frame buffers and bitstream buffers. */ + uint64_t user_data_tag; + + /* BufferFlags */ + uint32_t bitstream_flags; + #define MVE_BUFFER_BITSTREAM_FLAG_EOS (0x00000001) + #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME (0x00000010) + #define MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME (0x00000020) + #define MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG (0x00000080) + #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME (0x00000400) + #define MVE_BUFFER_BITSTREAM_FLAG_ENC_STATS (0x00010000) + #define MVE_BUFFER_BITSTREAM_FLAG_BSEOF (0x00100000) + + /* Length of allocated buffer */ + uint32_t bitstream_alloc_bytes; + + /* Byte offset from start to first byte */ + uint32_t bitstream_offset; + + /* Number of bytes in the buffer */ + uint32_t bitstream_filled_len; + + /* Pointer to buffer start */ + uint32_t bitstream_buf_addr; + + /* frame_type. 0:I, 1:P, 2:B, 3:b */ + uint8_t frame_type; + #define MVE_FRAME_TYPE_I 0 + #define MVE_FRAME_TYPE_P 1 + #define MVE_FRAME_TYPE_B 2 // B frame + #define MVE_FRAME_TYPE_LOWER_B 3 // b frame + #define MVE_FRAME_TYPE_P_KEY 4 + #define MVE_FRAME_TYPE_NO_REF_P 5 // only svct3 + #define MVE_FRAME_TYPE_GDR 6 // GDR + + uint8_t src_transform; + #define MVE_SRC_TRANSFORM_NONE (0) // none src transform + #define MVE_SRC_TRANSFORM_R90 (1) // rotate 90 degrees + #define MVE_SRC_TRANSFORM_R180 (2) // rotate 180 degrees + #define MVE_SRC_TRANSFORM_R270 (3) // rotate 270 degrees + #define MVE_SRC_TRANSFORM_VFLIP (4) // vertical flip (no rotation) + #define MVE_SRC_TRANSFORM_R90_VFLIP (5) // rotate 90 degrees and vertical flip + #define MVE_SRC_TRANSFORM_R180_VFLIP (6) // rotate 180 degrees and vertical flip + #define MVE_SRC_TRANSFORM_R270_VFLIP (7) // rotate 270 degrees and vertical flip + + /* Pad to force 8-byte alignment */ + //uint32_t reserved; + uint16_t bitstream_remaining_kb; // remaining kbytes of bitstream not returned to host. +}; + +/* + * Define a region in 16x16 units + * + * The region is macroblock positions (x,y) in the range + * mbx_left <= x < mbx_right + * mby_top <= y < mby_bottom + */ +struct mve_buffer_param_region +{ + uint16_t mbx_left; /* macroblock x left edge (inclusive) */ + uint16_t mbx_right; /* macroblock x right edge (exclusive) */ + uint16_t mby_top; /* macroblock y top edge (inclusive) */ + uint16_t mby_bottom; /* macroblock y bottom edge (exclusive) */ + int16_t qp_delta; /* QP delta value for this region, this + * delta applies to QP values in the ranges: + * H264: 0-51 + * HEVC: 0-51 + * VP9: 0-255 */ + uint8_t prio; /* the priority of this region */ + uint8_t force_intra; /* force intra to this ROI region, refer to macro: FEATURE_SUPPORT_ROI_MISC */ +}; + +/* input for encoder, + * the mve_buffer_param_regions buffer stores the information for FRAME buffers, + * and the information for regions of interest. + */ +struct mve_buffer_param_regions +{ + uint8_t n_regions; /* Number of regions */ + uint8_t reserved[ 3 ]; + #define MVE_MAX_FRAME_REGIONS 16 + struct mve_buffer_param_region region[ MVE_MAX_FRAME_REGIONS ]; +}; + +/* the block parameter record specifies the various properties of a quad */ +struct mve_block_param_record +{ + uint32_t qp_delta; + /* Bitset of four 4-bit QP delta values for a quad. + * For H.264 and HEVC these are qp delta values in the range -8 to +7. + * For Vp9 these are segment map values in the range 0 to 7. + */ + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (12) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (18) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_FORCE_FIELD (24) + #define MVE_BLOCK_PARAM_RECORD_QP_FORCE_FIELD_SZ (5) + #define MVE_BLOCK_PARAM_RECORD_QUAD_FORCE_INTRA (29) + #define MVE_BLOCK_PARAM_RECORD_QUAD_FORCE_INTRA_SZ (1) + #define MVE_BLOCK_PARAM_RECORD_QP_ABSOLUTE (30) + #define MVE_BLOCK_PARAM_RECORD_QP_ABSOLUTE_SZ (1) + #define MVE_BLOCK_PARAM_RECORD_QP_QUAD_SKIP (31) + #define MVE_BLOCK_PARAM_RECORD_QP_QUAD_SKIP_SZ (1) + + #define MVE_BLOCK_PARAM_RECORD_FORCE_NONE (0x00) + #define MVE_BLOCK_PARAM_RECORD_FORCE_QP (0x01) + uint32_t min_qp; + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_LEFT_16X16 (0) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_RIGHT_16X16 (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_RIGHT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_LEFT_16X16 (12) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_RIGHT_16X16 (18) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_RIGHT_16X16_SZ (6) +}; + +/* block configuration uncompressed rows header. this configures the size of the + * uncompressed body. */ +struct mve_buffer_general_rows_uncomp_hdr +{ + uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */ + uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */ + uint8_t reserved[2]; +}; + +/* block configuration uncompressed rows body. this structure contains an array + * of block parameter records whose length is (n_cols_minus1 + 1) * (n_rows_minus1 + 1) + * elements. therefore the allocation of this structure needs to be dynamic and + * a pointer to the allocated memory should then be assigned to the general + * purpose buffer data pointer + */ +struct mve_buffer_general_rows_uncomp_body +{ + /* the size of this array is variable and not necessarily equal to 1. + * therefore the sizeof operator should not be used + */ + struct mve_block_param_record bpr[1]; +}; + +/* input for encoder, block level configurations. + * the row based block configurations can be defined in different formats. they + * are stored in the blk_cfgs union and identified by the blk_cfg_type member. + * these definitions consist of a header and body pair. the header part contains + * configuration information for the body. the body part describes the actual + * layout of the data buffer pointed to by the mve_buffer_general_hdr buffer_ptr. + */ +struct mve_buffer_general_block_configs +{ + uint8_t blk_cfg_type; + #define MVE_BLOCK_CONFIGS_TYPE_NONE (0x00) + #define MVE_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff) + uint8_t reserved[3]; + union + { + struct mve_buffer_general_rows_uncomp_hdr rows_uncomp; + } blk_cfgs; +}; + +/* for the mve_buffer_general_encoder_stats buffer type the body data + * is an array of records of the following format, one record for each + * 32x32 pixel block of the picture. Values that are marked "per CTU" + * are only valid in the first 32x32 block of the 64x64 CTU for HEVC or VP9. + * For H.264 the "per CTU" values are the sum of four 16x16 macroblocks. + * Each record is 12 bytes and must be 32-bit aligned. + */ +struct mve_block_stats_record_full +{ + uint8_t intra_count; // number of 8x8 blocks in the CTU that are intra + uint8_t reserved0; // not used + uint16_t bit_estimate; // bit estimate for the CTU + uint16_t luma_mean; // luminance mean + uint16_t luma_cplx; // luma complexity measure (0=flat) + int16_t rmv_x; // rough x motion vector in pixels + int16_t rmv_y; // rough y motion vector in pixels +}; + +/* input for encoder, block level statistics + * this buffer is added to the encoder input frame queue, before the + * input frame for which statistics are to be gathered. The buffer is + * filled duing frame analysis and returned in the input buffer return + * queue before the input buffer is returned. + */ +struct mve_buffer_general_encoder_stats +{ + uint8_t encoder_stats_type; + #define MVE_ENCODER_STATS_TYPE_FULL (0x01) + uint8_t frame_type; // See MVE_FRAME_TYPE_* + #define MVE_FRAME_TYPE_I 0 + #define MVE_FRAME_TYPE_P 1 + #define MVE_FRAME_TYPE_B 2 + uint8_t used_as_reference; // 0=No, 1=Yes + uint8_t qp; // base quantizer used for the frame + // HEVC, H.264: 0-51. VP9: 0-63 + uint32_t picture_count; // display order picture count + uint16_t num_cols; // number of columns (each 32 pixels wide) + uint16_t num_rows; // number of rows (each 32 pixels high) + uint32_t ref_pic_count[2]; // display order picture count of references + // unused values are set to zero +}; + +/* input for encoder */ +struct mve_buffer_param_qp +{ + /* QP (quantization parameter) for encode. + * + * When used to set fixed QP for encode, with rate control + * disabled, then the valid ranges are: + * H264: 0-51 + * HEVC: 0-51 + * VP8: 0-63 + * VP9: 0-63 + * Note: The QP must be set separately for I, P and B frames. + * + * But when this message is used with the regions-feature, + * then the valid ranges are the internal bitstream ranges: + * H264: 0-51 + * HEVC: 0-51 + * VP8: 0-127 + * VP9: 0-255 + */ + int32_t qp; + int32_t epr_iframe_enable; +}; + +struct mve_reset_gop_dynamic +{ + uint32_t reset_gop_pframes; +}; + +struct mve_reset_ltr_peroid_dynamic +{ + uint32_t reset_ltr_peroid_pframes; +}; + +/* output from decoder */ +struct mve_buffer_param_display_size +{ + uint16_t display_width; + uint16_t display_height; +}; + +/* output from decoder, colour information needed for hdr */ +struct mve_buffer_param_colour_description +{ + uint32_t flags; + #define MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1) + #define MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2) + + uint8_t range; /* Unspecified=0, Limited=1, Full=2 */ + #define MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED (0) + #define MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED (1) + #define MVE_BUFFER_PARAM_COLOUR_RANGE_FULL (2) + + uint8_t colour_primaries; /* see hevc spec. E.3.1 */ + uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */ + uint8_t matrix_coeff; /* see hevc spec. E.3.1 */ + + uint16_t mastering_display_primaries_x[3]; /* see hevc spec. D.3.27 */ + uint16_t mastering_display_primaries_y[3]; /* see hevc spec. D.3.27 */ + uint16_t mastering_white_point_x; /* see hevc spec. D.3.27 */ + uint16_t mastering_white_point_y; /* see hevc spec. D.3.27 */ + uint32_t max_display_mastering_luminance; /* see hevc spec. D.3.27 */ + uint32_t min_display_mastering_luminance; /* see hevc spec. D.3.27 */ + + uint32_t max_content_light_level; /* see hevc spec. D.3.35 */ + uint32_t avg_content_light_level; /* see hevc spec. D.3.35 */ + + uint8_t video_format_present_flag; + uint8_t video_format; + uint8_t aspect_ratio_info_present_flag; + uint8_t aspect_ratio_idc; + uint8_t timing_flag_info_present_flag; + uint16_t sar_width; + uint16_t sar_height; + uint32_t num_units_in_tick; + uint32_t time_scale; + + uint8_t pad[7]; // pad for 8-byte alignment +}; + +struct mve_buffer_param_sei_user_data_unregistered +{ + uint8_t flags; + #define MVE_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1) + uint8_t uuid[16]; + char user_data[256 - 35]; + uint8_t user_data_len; + + uint8_t reserved[5]; +}; + +/* output from decoder see hevc spec. D.3.3 */ +struct mve_buffer_param_frame_field_info +{ + uint8_t pic_struct; + uint8_t source_scan_type; + uint8_t duplicate_flag; + uint8_t reserved; +}; + +/* output from decoder, VC-1 specific feature only relevant + * if using AFBC output + */ +struct mve_buffer_param_range_map +{ + uint8_t luma_map_enabled; + uint8_t luma_map_value; + uint8_t chroma_map_enabled; + uint8_t chroma_map_value; +}; + +/* input for encoder */ +struct mve_buffer_param_rate_control +{ + uint32_t rate_control_mode; + #define MVE_OPT_RATE_CONTROL_MODE_OFF (0) + #define MVE_OPT_RATE_CONTROL_MODE_STANDARD (1) + #define MVE_OPT_RATE_CONTROL_MODE_VARIABLE (2) + #define MVE_OPT_RATE_CONTROL_MODE_CONSTANT (3) + #define MVE_OPT_RATE_CONTROL_MODE_C_VARIABLE (4) + uint32_t target_bitrate; /* in bits per second */ + uint32_t maximum_bitrate; /* in bits per second */ +}; + +/* input for encoder */ +struct mve_buffer_param_rate_control_qp_range +{ + int32_t qp_min; + int32_t qp_max; +}; + +/* input for encoder, see hevc spec. D.3.16 */ +struct mve_buffer_param_frame_packing +{ + uint32_t flags; + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_QUINCUNX_SAMPLING (1) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_SPATIAL_FLIPPING (2) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FRAME0_FLIPPED (4) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FIELD_VIEWS (8) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_CURRENT_FRAME_IS_FRAME0 (16) + + uint8_t frame_packing_arrangement_type; + uint8_t content_interpretation_type; + + uint8_t frame0_grid_position_x; + uint8_t frame0_grid_position_y; + uint8_t frame1_grid_position_x; + uint8_t frame1_grid_position_y; + + uint8_t reserved[ 2 ]; +}; + +struct mve_buffer_param_rectangle +{ + uint16_t x_left; /* pixel x left edge (inclusive) */ + uint16_t x_right; /* pixel x right edge (exclusive) */ + uint16_t y_top; /* pixel y top edge (inclusive) */ + uint16_t y_bottom; /* pixel y bottom edge (exclusive) */ +}; + +/* input for encoder, + * indicate which parts of the source picture has changed. + * The encoder can (optionally) use this information to + * reduce memory bandwidth. + * + * n_rectangles=0 indicates the source picture is unchanged. + * + * This parameter only applies to the picture that immediately + * follows (and not to subsequent ones). + */ +struct mve_buffer_param_change_rectangles +{ + uint8_t n_rectangles; /* Number of rectangles */ + uint8_t reserved[3]; + #define MVE_MAX_FRAME_CHANGE_RECTANGLES 2 + struct mve_buffer_param_rectangle rectangles[MVE_MAX_FRAME_CHANGE_RECTANGLES]; +}; + +/* input for VP9 encoder, + * specify the qp deltas for each segment map index. + * These are intended to be used with block configs only. + */ +struct mve_buffer_param_vp9_segmap +{ + #define VP9SPEC_MAX_SEGMENTS 8 + int8_t qp_delta[VP9SPEC_MAX_SEGMENTS]; /* Qp delta to use for each segment map region */ + int8_t num_segments; /* Number of active segments (to set coding probability) */ +}; + +typedef struct mve_buffer_param_osd_cfg { + uint8_t osd_inside_enable; + uint8_t osd_inside_alpha_enable; + uint8_t osd_inside_convert_color_enable; + uint8_t osd_inside_alpha_value; /* as alpha range [0~16], use u8 */ + uint8_t osd_inside_convert_color_threshold;/* threshold range [0~255], if input is 10bit, th * 4 */ + uint8_t osd_inside_rgb2yuv_mode;/* 0-601L, 1-601F, 2-709_L, 3-709_F */ + uint16_t osd_inside_start_x; /* pixel x left edge (inclusive) */ + uint16_t osd_inside_start_y; /* pixel y top edge (inclusive) */ + uint16_t reserved[3]; +} t_mve_buffer_param_osd_cfg; + +struct mve_buffer_param_osd_rectangles { + #define MVE_MAX_FRAME_OSD_REGION 2 + t_mve_buffer_param_osd_cfg osd_single_cfg[MVE_MAX_FRAME_OSD_REGION]; +}; + +/* Parameters that are sent in the same communication channels + * as the buffers. A parameter applies to all subsequent buffers. + * Some types are only valid for decode, and some only for encode. + */ +struct mve_buffer_param +{ + uint32_t type; /* Extra data: */ + #define MVE_BUFFER_PARAM_TYPE_QP (2) /* qp */ + #define MVE_BUFFER_PARAM_TYPE_REGIONS (3) /* regions */ + #define MVE_BUFFER_PARAM_TYPE_DISPLAY_SIZE (5) /* display_size */ + #define MVE_BUFFER_PARAM_TYPE_RANGE_MAP (6) /* range_map */ + #define MVE_BUFFER_PARAM_TYPE_FRAME_RATE (9) /* arg, in frames per second, as a + * fixed point Q16 value, for example + * 0x001e0000 == 30.0 fps */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL (10) /* rate_control */ + #define MVE_BUFFER_PARAM_TYPE_QP_I (12) /* qp for I frames, when no rate control */ + #define MVE_BUFFER_PARAM_TYPE_QP_P (13) /* qp for P frames, when no rate control */ + #define MVE_BUFFER_PARAM_TYPE_QP_B (14) /* qp for B frames, when no rate control */ + #define MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION (15) /* colour_description */ + #define MVE_BUFFER_PARAM_TYPE_FRAME_PACKING (16) /* frame_packing */ + #define MVE_BUFFER_PARAM_TYPE_FRAME_FIELD_INFO (17) /* frame_field_info */ + #define MVE_BUFFER_PARAM_TYPE_GOP_RESET (18) /* no extra data */ + #define MVE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES (19) /* arg, number of output buffers that are + * complete and held by firmware in the + * DPB for reordering purposes. + * Valid after the next frame is output */ + #define MVE_BUFFER_PARAM_TYPE_CHANGE_RECTANGLES (20) /* change rectangles */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE (21) /* rate_control_qp_range */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE (23) /* arg */ + #define MVE_BUFFER_PARAM_TYPE_VP9_SEGMAP (24) /* VP9 segment map settings */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE_I (25) /* special range for I frames, + * rate_control_qp_range */ + #define MVE_BUFFER_PARAM_TYPE_SEI_USER_DATA_UNREGISTERED (26) /* sei user_data_unregistered */ + #define MVE_BUFFER_PARAM_TYPE_GOP_RESET_DYNAMIC (27) + #define MVE_BUFFER_PARAM_TYPE_GOP_RESET_LTR_PEROID_DYNAMIC (28) /* reset ltr peroid dynamiclly */ + #define MVE_BUFFER_PARAM_TYPE_ENC_STATS (29) /* encode stats */ + #define MVE_BUFFER_PARAM_TYPE_OSD_RECTANGLES (30) /* osd rectangles */ + + union + { + uint32_t arg; /* some parameters only need a uint32_t as argument */ + struct mve_buffer_param_qp qp; + struct mve_reset_gop_dynamic reset_gop_dynamic; + struct mve_reset_ltr_peroid_dynamic reset_ltr_peroid_dynamic; + struct mve_buffer_param_regions regions; + struct mve_buffer_param_display_size display_size; + struct mve_buffer_param_range_map range_map; + struct mve_buffer_param_rate_control rate_control; + struct mve_buffer_param_rate_control_qp_range rate_control_qp_range; + struct mve_buffer_param_colour_description colour_description; + struct mve_buffer_param_frame_packing frame_packing; + struct mve_buffer_param_frame_field_info frame_field_info; + struct mve_buffer_param_change_rectangles change_rectangles; + struct mve_buffer_param_vp9_segmap vp9_segmap; + struct mve_buffer_param_sei_user_data_unregistered user_data_unregistered; + struct mve_buffer_param_enc_stats enc_stats; + struct mve_buffer_param_osd_rectangles osd_rectangles_buff; + } data; +}; + + +/* The general purpose buffer header stores the common fields of an + * mve_buffer_general. it contains the pointer to the data buffer that contains + * the general purpose data + */ +struct mve_buffer_general_hdr +{ + /* For identification of the buffer, this is not changed by the firmware. */ + uint64_t host_handle; + + /* this depends upon the type of the general purpose buffer */ + uint64_t user_data_tag; + + /* pointer to the buffer containing the general purpose data. the format + * of this data is defined by the configuration in the mve_buffer_general */ + uint32_t buffer_ptr; + + /* size of the buffer pointed to by buffer_ptr */ + uint32_t buffer_size; + + /* selects the type of semantics to use for the general purpose buffer. it + * tags (or discriminates) the union config member in mve_buffer_general + */ + uint16_t type; /* Extra data: */ + #define MVE_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */ + #define MVE_BUFFER_GENERAL_TYPE_ENC_STATS (3) /* encoder_stats */ + + /* size of the mve_buffer_general config member */ + uint16_t config_size; + + /* pad to force 8-byte alignment */ + uint32_t reserved; +}; + +/* The general purpose buffer consists of a header and a configuration. The + * header contains a pointer to a buffer whose format is described by the + * configuration. The type of configuration is indicated by the type value in + * the header. N.B. In use, the size of the config part of this structure is + * defined in the header and is not necessarily equal to that returned by the + * sizeof() operator. This allows a more size efficient communication between + * the host and firmware. + */ +struct mve_buffer_general +{ + struct mve_buffer_general_hdr header; + + /* used to describe the configuration of the general purpose buffer data + * pointed to be buffer_ptr + */ + union + { + struct mve_buffer_general_block_configs block_configs; + } config; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ */ diff --git a/drivers/media/platform/cix/cix_vpu/external/fw_v3/mve_protocol_def.h b/drivers/media/platform/cix/cix_vpu/external/fw_v3/mve_protocol_def.h new file mode 100755 index 000000000000..97f9af186922 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/external/fw_v3/mve_protocol_def.h @@ -0,0 +1,2019 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/* + * Copyright: + * ---------------------------------------------------------------------------- + * This confidential and proprietary software may be used only as authorized + * by a licensing agreement from Arm Technology (China) Co., Ltd. + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * The entire notice above must be reproduced on all authorized copies and + * copies may only be made to the extent permitted by a licensing agreement + * from Arm Technology (China) Co., Ltd. + * ---------------------------------------------------------------------------- + */ +#ifndef __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ +#define __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +/***************************************************************************** + * + * Communication protocol between the host/driver and the MVE firmware, + * the 'host interface'. + * + * MVE == LINLON Video Engine + * + * Protocol version 3.3 + * + * Note: Message structs may be expanded in the future; the host should + * use the 'size' of the message to determine how many bytes to + * read from the message queue, rather than a sizeof(struct). + * + ****************************************************************************/ + + + + + + + + + + + + + + + + + +/***************************************************************************** + * + * Virtual memory regions + * + * ..._ADDR_BEGIN gives the starting virtual address of the region, + * and ..._ADDR_END the (non-inclusive) ending address, such that + * the size of the region is obtained with the subtraction + * (..._ADDR_END - ..._ADDR_BEGIN). + * + ****************************************************************************/ + +/* Memory region for first firmware instance */ +#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN (0x00000000u) +#define MVE_MEM_REGION_FW_INSTANCE0_ADDR_END (0x000FFFFFu + 1) + +/* Memory regions for other firmware instances */ +#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN (0x01000000u) +#define MVE_MEM_REGION_FW_INSTANCE1_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN (0x02000000u) +#define MVE_MEM_REGION_FW_INSTANCE2_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN (0x03000000u) +#define MVE_MEM_REGION_FW_INSTANCE3_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN (0x04000000u) +#define MVE_MEM_REGION_FW_INSTANCE4_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN (0x05000000u) +#define MVE_MEM_REGION_FW_INSTANCE5_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN (0x06000000u) +#define MVE_MEM_REGION_FW_INSTANCE6_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN (0x07000000u) +#define MVE_MEM_REGION_FW_INSTANCE7_ADDR_END \ + (MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN + MVE_MEM_REGION_FW_INSTANCE0_ADDR_END) + +/* + * Areas for communication between host and MVE are placed in the interval + * 0x10079000 - 0x1007FFFF, see special defines further down. + */ + +/* PROTECTED virtual memory region */ +#define MVE_MEM_REGION_PROTECTED_ADDR_BEGIN (0x20000000u) +#define MVE_MEM_REGION_PROTECTED_ADDR_END (0x6FFFFFFFu + 1) + +/* FRAMEBUF virtual memory region */ +#define MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN (0x70000000u) +#define MVE_MEM_REGION_FRAMEBUF_ADDR_END (0xEFFFFFFFu + 1) + +/* 0xF0000000 - 0xFFFFFFFF is used internally in MVE */ + + + + + + + + + + + + + + + + +/***************************************************************************** + * + * Communication queues between HOST/DRIVER and MVE + * + * Address for queue for messages in to MVE, + * one struct mve_comm_area_host located here + * + ****************************************************************************/ + +#define MVE_COMM_MSG_INQ_ADDR (0x10079000u) + +/* Address for queue for messages out from MVE, + * one struct mve_comm_area_mve located here + */ +#define MVE_COMM_MSG_OUTQ_ADDR (0x1007A000u) + +/* Address for queue for input buffers in to MVE, + * one struct mve_comm_area_host located here + */ +#define MVE_COMM_BUF_INQ_ADDR (0x1007B000u) + +/* Address for queue for input buffers returned from MVE, + * one struct mve_comm_area_mve located here + */ +#define MVE_COMM_BUF_INRQ_ADDR (0x1007C000u) + +/* Address for queue for output buffers in to MVE, + * one struct mve_comm_area_host located here + */ +#define MVE_COMM_BUF_OUTQ_ADDR (0x1007D000u) + +/* Address for queue for output buffers returned from MVE, + * one struct mve_comm_area_mve located here + */ +#define MVE_COMM_BUF_OUTRQ_ADDR (0x1007E000u) + +/* One struct mve_rpc_communication_area located here */ +#define MVE_COMM_RPC_ADDR (0x1007F000u) + +/* Address for ram_print buffer in FW */ +#define MVE_FW_PRINT_RAM_ADDR (0x10100000u) +#define MVE_FW_PRINT_RAM_SIZE (0x80000u) + +/* One page of memory (4 kB) is used for each queue, + * so maximum 1024 words, but need room for some counters as well, + * see structs mve_comm_area_mve and mve_comm_area_host below. + */ +#define MVE_COMM_QUEUE_SIZE_IN_WORDS 1020 + +/* This is the part of the message area that is written by host. */ +struct mve_comm_area_host +{ + volatile uint16_t out_rpos; + volatile uint16_t in_wpos; + volatile uint32_t reserved[ 3 ]; + /* + * Queue of messages to MVE, each block of data prefixed with + * a mve_msg_header + */ + volatile uint32_t in_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ]; +}; + +/* This is the part of the message area that is written by MVE. */ +struct mve_comm_area_mve +{ + volatile uint16_t out_wpos; + volatile uint16_t in_rpos; + volatile uint32_t reserved[ 3 ]; + /* + * Queue of messages to host, each block of data prefixed with + * a mve_msg_header + */ + volatile uint32_t out_data[ MVE_COMM_QUEUE_SIZE_IN_WORDS ]; +}; + +#define MVE_RPC_AREA_SIZE_IN_WORDS 256 +#define MVE_RPC_DATA_SIZE_IN_WORDS (MVE_RPC_AREA_SIZE_IN_WORDS - 3) +union mve_rpc_params +{ + volatile uint32_t data[ MVE_RPC_DATA_SIZE_IN_WORDS ]; + struct + { + char string[ MVE_RPC_DATA_SIZE_IN_WORDS * 4 ]; + } debug_print; + struct + { + uint32_t size; + uint32_t max_size; + uint8_t region; /* Memory region selection */ + #define MVE_MEM_REGION_PROTECTED (0) + #define MVE_MEM_REGION_OUTBUF (1) + #define MVE_MEM_REGION_FRAMEBUF (MVE_MEM_REGION_OUTBUF) + + /* The newly allocated memory must be placed + * on (at least) a 2^(log2_alignment) boundary + */ + uint8_t log2_alignment; + } mem_alloc; + struct + { + uint32_t ve_pointer; + uint32_t new_size; + } mem_resize; + struct + { + uint32_t ve_pointer; + } mem_free; +}; + +struct mve_rpc_communication_area +{ + volatile uint32_t state; + #define MVE_RPC_STATE_FREE (0) + #define MVE_RPC_STATE_PARAM (1) + #define MVE_RPC_STATE_RETURN (2) + volatile uint32_t call_id; + #define MVE_RPC_FUNCTION_DEBUG_PRINTF (1) + #define MVE_RPC_FUNCTION_MEM_ALLOC (2) + #define MVE_RPC_FUNCTION_MEM_RESIZE (3) + #define MVE_RPC_FUNCTION_MEM_FREE (4) + volatile uint32_t size; + union mve_rpc_params params; +}; + +struct mve_fw_ram_print_head_aera +{ + volatile uint32_t rd_cnt; + volatile uint32_t reserved0[15]; + + volatile uint32_t flag; + volatile uint32_t index; + volatile uint32_t wr_cnt; + volatile uint32_t reserved1[13]; +}; + + + + + + + + + + + + + + +/********************************************************************* + * + * Message codes + * + *********************************************************************/ + +/* Messages consist of one struct mve_msg_header, possibly followed + * by extra data. + */ +struct mve_msg_header +{ + uint16_t code; + /* REQUESTs are messages from the + * host/driver to the firmware: Code: Extra data in message: */ + #define MVE_REQUEST_CODE_GO (1001) /* no extra data */ + #define MVE_REQUEST_CODE_STOP (1002) /* no extra data */ + #define MVE_REQUEST_CODE_INPUT_FLUSH (1003) /* no extra data */ + #define MVE_REQUEST_CODE_OUTPUT_FLUSH (1004) /* no extra data */ + #define MVE_REQUEST_CODE_SWITCH (1005) /* no extra data */ + #define MVE_REQUEST_CODE_PING (1006) /* no extra data */ + #define MVE_REQUEST_CODE_DUMP (1008) /* no extra data */ + #define MVE_REQUEST_CODE_JOB (1009) /* struct mve_request_job */ + #define MVE_REQUEST_CODE_SET_OPTION (1010) /* struct mve_request_set_option (variable size) */ + #define MVE_REQUEST_CODE_RELEASE_REF_FRAME (1011) /* struct mve_request_release_ref_frame */ + #define MVE_REQUEST_CODE_IDLE_ACK (1012) /* no extra data */ + #define MVE_REQUEST_CODE_DEBUG (1013) /* level: 0 for disable, refer to fw_log_level */ + /* RESPONSEs are messages from + * the firmware to the host: */ + #define MVE_RESPONSE_CODE_SWITCHED_IN (2001) /* struct mve_response_switched_in */ + #define MVE_RESPONSE_CODE_SWITCHED_OUT (2002) /* struct mve_response_switched_out */ + #define MVE_RESPONSE_CODE_SET_OPTION_CONFIRM (2003) /* no extra data */ + #define MVE_RESPONSE_CODE_JOB_DEQUEUED (2004) /* struct mve_response_job_dequeued */ + #define MVE_RESPONSE_CODE_INPUT (2005) /* no extra data, but buffer placed in buffer queue */ + #define MVE_RESPONSE_CODE_OUTPUT (2006) /* no extra data, but buffer placed in buffer queue */ + #define MVE_RESPONSE_CODE_INPUT_FLUSHED (2007) /* no extra data */ + #define MVE_RESPONSE_CODE_OUTPUT_FLUSHED (2008) /* no extra data */ + #define MVE_RESPONSE_CODE_PONG (2009) /* no extra data */ + #define MVE_RESPONSE_CODE_ERROR (2010) /* struct mve_response_error */ + #define MVE_RESPONSE_CODE_STATE_CHANGE (2011) /* struct mve_response_state_change */ + #define MVE_RESPONSE_CODE_DUMP (2012) /* no extra data */ + #define MVE_RESPONSE_CODE_IDLE (2013) /* no extra data */ + #define MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM (2014) /* struct mve_response_frame_alloc_parameters */ + #define MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS (2015) /* struct mve_response_sequence_parameters */ + #define MVE_RESPONSE_CODE_EVENT (2016) /* struct mve_response_event (variable size) */ + #define MVE_RESPONSE_CODE_SET_OPTION_FAIL (2017) /* struct mve_response_set_option_failed */ + #define MVE_RESPONSE_CODE_REF_FRAME_UNUSED (2018) /* struct mve_response_ref_frame_unused */ + #define MVE_RESPONSE_CODE_DEBUG (2019) /* no extra data */ + #define MVE_RESPONSE_CODE_TRACE (2050) /* msg_header(code, size), counter, sum */ + /* BUFFERs are sent from host to firmware, + * and then return at some time: */ + #define MVE_BUFFER_CODE_FRAME (3001) /* struct mve_buffer_frame */ + #define MVE_BUFFER_CODE_BITSTREAM (3002) /* struct mve_buffer_bitstream */ + #define MVE_BUFFER_CODE_PARAM (3003) /* struct mve_buffer_param */ + #define MVE_BUFFER_CODE_GENERAL (3004) /* struct mve_buffer_general */ + + uint16_t size; /* size in bytes of trailing data, 0 if none */ +}; + + + + + + + + + + + + + + + +enum fw_log_level{ + FW_LOG_ERROR = 1, + FW_LOG_WARNING, + FW_LOG_INFO, + FW_LOG_DEBUG, + FW_LOG_VERBOSE, + FW_LOG_MAX +}; + + +/********************************************************************* + * + * REQUESTs are messages from the host to the firmware + * + * Some of the MVE_REQUEST_CODE_ codes are followed by one of the + * structs below. + * + *********************************************************************/ + +struct mve_request_job +{ + uint16_t cores; /* >= 1, number of cores to use, must match request to HW scheduler */ + uint16_t frames; /* number of frames to process, zero means infinite */ + uint32_t flags; /* can be zero */ + #define MVE_JOB_FLAG_DISABLE_BNDMGR (0x01) +}; + +struct mve_response_trace +{ + struct mve_msg_header msg_hdr; + uint32_t sum; + uint32_t count; +}; + +struct mve_request_set_option +{ + uint32_t index; + #define MVE_SET_OPT_INDEX_NALU_FORMAT (1) /* see arg, MVE_OPT_NALU_FORMAT_ */ + #define MVE_SET_OPT_INDEX_STREAM_ESCAPING (2) /* arg=1 to enable (default), arg=0 to disable */ + #define MVE_SET_OPT_INDEX_PROFILE_LEVEL (3) /* data.profile_level */ + #define MVE_SET_OPT_INDEX_HOST_PROTOCOL_PRINTS (4) /* arg=1 to enable, arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_PROFILING (5) /* arg=1 to enable, arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_DISABLE_FEATURES (6) /* see arg, MVE_OPT_DISABLE_FEATURE_ */ + #define MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS (7) /* decode, arg=1 to enable, + * arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_FRAME_REORDERING (8) /* decode, arg=1 to enable (default), + * arg=0 to disable */ + #define MVE_SET_OPT_INDEX_INTBUF_SIZE (9) /* decode, arg = suggested limit of intermediate + * buffer allocation */ + #define MVE_SET_OPT_INDEX_ENC_P_FRAMES (16) /* encode, arg = nPFrames */ + #define MVE_SET_OPT_INDEX_ENC_B_FRAMES (17) /* encode, arg = number of B frames */ + #define MVE_SET_OPT_INDEX_GOP_TYPE (18) /* encode, see arg */ + #define MVE_SET_OPT_INDEX_INTRA_MB_REFRESH (19) /* encode, arg */ + #define MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED (20) /* encode, arg = 0 or 1 */ + #define MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC (21) /* encode, arg = 0 or 1 */ + #define MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP (22) /* encode, arg = 0 or 1 */ + #define MVE_SET_OPT_INDEX_TILES (23) /* encode, data.tiles */ + #define MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE (24) /* HEVC encode, arg = 8 or 16, + * for sizes 8x8 or 16x16 */ + #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE (25) /* encode, see arg */ + #define MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE (26) /* encode, see arg */ + #define MVE_SET_OPT_INDEX_ENC_H264_CABAC (27) /* encode, arg = 0 or 1, enabled by default */ + #define MVE_SET_OPT_INDEX_ENC_SLICE_SPACING (28) /* encode, arg = suggested number of + * CTUs/macroblocks in a slice */ + #define MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE (30) /* VP9 encode, see arg */ + #define MVE_SET_OPT_INDEX_RESYNC_INTERVAL (31) /* JPEG encode, arg = nRestartInterval + * = nResynchMarkerSpacing */ + #define MVE_SET_OPT_INDEX_HUFFMAN_TABLE (32) /* JPEG encode, data.huffman_table */ + #define MVE_SET_OPT_INDEX_QUANT_TABLE (33) /* JPEG encode, data.quant_table */ + #define MVE_SET_OPT_INDEX_ENC_EXPOSE_REF_FRAMES (34) /* encode debug, arg = 0 or 1, + * disabled by default */ + #define MVE_SET_OPT_INDEX_MBINFO_OUTPUT (35) /* encode, arg=1 to enable, + * arg=0 to disable (default) */ + #define MVE_SET_OPT_INDEX_MV_SEARCH_RANGE (36) /* encode, data,motion_vector_search_range */ + #define MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH (38) /* encode, data.bitdepth, to set other bitdepth + * of encoded stream than of input frames */ + #define MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT (39) /* encode, arg, to set other chroma format of + * encoded stream than of input frames */ + #define MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE (40) /* encode, arg, select which way RGB is converted + * to YUV before encoding */ + #define MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT (41) /* encode, arg, the maxium bandwidth limit defined + * by host */ + #define MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT (42) /* arg=timeout, arg=0 to disable */ + #define MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC (43) /* encode, arg; 0,1,2 for H264; 0,1 for HEVC */ + #define MVE_SET_OPT_INDEX_ENC_ADPTIVE_QUANTISATION (44) /* encode (h264 and hevc) */ + #define MVE_SET_OPT_INDEX_QP_DELTA_I_P (45) + #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_REF (46) + #define MVE_SET_OPT_INDEX_QP_DELTA_I_B_NONREF (47) + #define MVE_SET_OPT_INDEX_CB_QP_OFFSET (48) + #define MVE_SET_OPT_INDEX_CR_QP_OFFSET (49) + #define MVE_SET_OPT_INDEX_LAMBDA_SCALE (50) /* encode, data.lambda_scale */ + #define MVE_SET_OPT_INDEX_ENC_MAX_NUM_CORES (51) /* maximum number of cores */ + #define MVE_SET_OPT_INDEX_ENC_FIXED_QP (56) + /* ARBITRARY_DOWNSCALE */ + #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE (57) /* decode, set downscaled width and height */ + /* SAO_EVAL */ + #define MVE_SET_OPT_INDEX_ENC_SAO_PENALTY (58) + #define MVE_SET_OPT_INDEX_ENC_SAO_EN (59) /* do not use now */ + #define MVE_SET_OPT_INDEX_ENC_SAO_LUMA_EN (60) + #define MVE_SET_OPT_INDEX_ENC_SAO_CHROMA_EN (61) + /* MVE_ENCODE_CROP_FEATURE */ + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_LEFT (62) + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_RIGHT (63) + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_TOP (64) + #define MVE_SET_OPT_INDEX_ENC_CROP_RARAM_BOTTOM (65) + /* LONG_TERM_REFERENCE */ + #define MVE_SET_OPT_INDEX_ENC_LTR_MODE (66) + #define MVE_SET_OPT_INDEX_ENC_LTR_PERIOD (67) + #define MVE_SET_OPT_INDEX_DEC_DOWNSCALE_POS_MODE (69) + #define MVE_SET_OPT_INDEX_MINI_FRAME_MAX (70) /* max cnt of mini frames*/ + /* Encode Data Statistics */ + #define MVE_SET_OPT_INDEX_ENC_STATS_MODE (71) + #define MVE_SET_OPT_INDEX_ENC_MULTI_SPS_PPS (73) + #define MVE_SET_OPT_INDEX_ENC_INIT_QPI (74) + #define MVE_SET_OPT_INDEX_ENC_INIT_QPP (75) + #define MVE_SET_OPT_INDEX_ENC_INIT_QPB (76) + + #define MVE_SET_OPT_INDEX_ENC_IPENALTY_ANGULAR (77) + #define MVE_SET_OPT_INDEX_ENC_IPENALTY_PLANAR (78) + #define MVE_SET_OPT_INDEX_ENC_IPENALTY_DC (79) + + #define MVE_SET_OPT_INDEX_ENC_RC_CLIP_TOP (80) + #define MVE_SET_OPT_INDEX_ENC_RC_CLIP_BOTTOM (81) + #define MVE_SET_OPT_INDEX_ENC_QPMAP_CLIP_TOP (82) + #define MVE_SET_OPT_INDEX_ENC_QPMAP_CLIP_BOTTOM (83) + + #define MVE_SET_OPT_INDEX_ENC_REF_RING_BUFFER (84) + #define MVE_SET_OPT_INDEX_ENC_JPEG_RC (85) //MVE_ENC_RC_JPEG + #define MVE_SET_OPT_INDEX_ENC_RC_I_BIT_RATIO (86) /* RC_I_BIT_RATIO */ + #define MVE_SET_OPT_INDEX_ENC_RC_I_BIT_MODE (87) /* RC_I_BIT_RATIO */ + #define MVE_SET_OPT_iNDEX_ENC_VISUAL_ENABLE (89)//enable_visual FW CODE IS iNDEX + /* SVCT3 level-1 peroid */ + #define MVE_SET_OPT_INDEX_ENC_SVCT3_LEVEL1_PEROID (88) + /* GDR */ + #define MVE_SET_OPT_INDEX_ENC_GDR_NUMBER (90) + #define MVE_SET_OPT_INDEX_ENC_GDR_PERIOD (91) + + #define MVE_SET_OPT_INDEX_SCD_ENABLE (93) + #define MVE_SET_OPT_INDEX_SCD_PERCENT (94) + #define MVE_SET_OPT_INDEX_SCD_THRESHOLD (95) + /* for aq, add new para */ + #define MVE_SET_OPT_INDEX_ENC_AQ_SSIM_EN (96) + #define MVE_SET_OPT_INDEX_ENC_AQ_NEG_RATIO (97) + #define MVE_SET_OPT_INDEX_ENC_AQ_POS_RATIO (98) + #define MVE_SET_OPT_INDEX_ENC_AQ_QPDELTA_LMT (99) + #define MVE_SET_OPT_INDEX_ENC_AQ_INIT_FRM_AVG_SVAR (100) + #define MVE_SET_OPT_INDEX_SCD_ADAPTIVE_I (101) + #define MVE_SET_OPT_INDEX_DEC_YUV2RGB_PARAMS (103) + #define MVE_SET_OPT_INDEX_ENC_FORCED_UV_VAL (104) + #define MVE_SET_OPT_INDEX_DEC_DSL_INTERP_MODE (105) + + #define MVE_SET_OPT_INDEX_ENC_SRC_CROPPING (106) + #define MVE_SET_OPT_INDEX_DEC_DST_CROPPING (107) //ENABLE_DST_CROP + #define MVE_SET_OPT_INDEX_DEC_AV1_FSF (1004) //FAST_SHOW_FRAME + + #define MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_ANGULAR (2011) + #define MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_PLANAR (2012) + #define MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_DC (2013) + union + { + uint32_t arg; /* Most options only need a uint32_t as argument */ + /* For option MVE_SET_OPT_INDEX_NALU_FORMAT, arg should + * be one of these: */ + #define MVE_OPT_NALU_FORMAT_START_CODES (1) + #define MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER (2) + #define MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD (4) + #define MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD (8) + #define MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD (16) + #define MVE_OPT_NALU_FORMAT_MBINFO (32) /* only used for debugging */ + /* For option MVE_SET_OPT_INDEX_GOP_TYPE, arg should + * be one of these: */ + #define MVE_OPT_GOP_TYPE_BIDIRECTIONAL (1) + #define MVE_OPT_GOP_TYPE_LOW_DELAY (2) + #define MVE_OPT_GOP_TYPE_PYRAMID (3) + #define MVE_OPT_GOP_TYPE_SVCT3 (4) + #define MVE_OPT_GOP_TYPE_GDR (5) + /* For option MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE, + * arg should be one of these: */ + #define MVE_OPT_VP9_PROB_UPDATE_DISABLED (0) + #define MVE_OPT_VP9_PROB_UPDATE_IMPLICIT (1) + #define MVE_OPT_VP9_PROB_UPDATE_EXPLICIT (2) + /* For option MVE_SET_OPT_INDEX_DISABLE_FEATURES, arg + * should be a bitmask with features to disable: */ + #define MVE_OPT_DISABLE_FEATURE_AFBC_COMP (0x00000001) /* VDMA AFBC Compression */ + #define MVE_OPT_DISABLE_FEATURE_REF_CACHE (0x00000002) /* REF caching */ + #define MVE_OPT_DISABLE_FEATURE_DEBLOCK (0x00000004) /* Deblocking */ + #define MVE_OPT_DISABLE_FEATURE_SAO (0x00000008) /* SAO */ + #define MVE_OPT_DISABLE_FEATURE_PIC_OUTPUT (0x00000020) /* Picture Output Removal */ + #define MVE_OPT_DISABLE_FEATURE_PIPE (0x00000040) /* Pipe (i.e. parser-only) */ + #define MVE_OPT_DISABLE_FEATURE_SLEEP (0x00000080) /* Clock gating + * (SOC_SYSCTRL.SLEEP bit) */ + #define MVE_OPT_DISABLE_FEATURE_AFBC_LEGACY_REF (0x00000100) /* Enables tiled AFBC format in + * reference buffers. Ignored + * for decode AFBC output */ + #define MVE_OPT_DISABLE_FEATURE_REF_PICS (0x00000400) /* Forces use of static 16x16 + * reference pics */ + #define MVE_OPT_DISABLE_FEATURE_REFSZ_LIMIT (0x00001000) /* Disable REFSZ bw limit */ + #define MVE_OPT_DISABLE_FEATURE_SUPPORT_NONIBC_TILE (0x00010000) /* AV1 decode, forces use tiles for AFBC */ + /* For options MVE_SET_OPT_INDEX_ENC_MB_TYPE_ENABLE + * and MVE_SET_OPT_INDEX_ENC_MB_TYPE_DISABLE, arg + * should be a bitmask of MVE_MBTYPEs: */ + #define MVE_MBTYPE_4x4 (0x00000001) /* 4x4 inter */ + #define MVE_MBTYPE_4x8 (0x00000002) /* 4x8 inter */ + #define MVE_MBTYPE_8x4 (0x00000004) /* 8x4 inter */ + #define MVE_MBTYPE_8x8 (0x00000008) /* 8x8 inter */ + #define MVE_MBTYPE_8x16 (0x00000010) /* 8x16 inter */ + #define MVE_MBTYPE_16x8 (0x00000020) /* 16x8 inter */ + #define MVE_MBTYPE_16x16 (0x00000040) /* 16x16 inter */ + #define MVE_MBTYPE_PSKIP (0x00000080) /* P Skip inter */ + #define MVE_MBTYPE_I4x4 (0x00000100) /* 4x4 intra */ + #define MVE_MBTYPE_I8x8 (0x00000200) /* 8x8 intra */ + #define MVE_MBTYPE_I16x16 (0x00000400) /* 16x16 intra */ + #define MVE_MBTYPE_I32x32 (0x00000800) /* 32x32 intra */ + #define MVE_MBTYPE_16x32 (0x00001000) /* 16x32 inter */ + #define MVE_MBTYPE_32x16 (0x00002000) /* 32x16 inter */ + #define MVE_MBTYPE_32x32 (0x00004000) /* 32x32 inter */ + /* For option MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE, + * arg should be one of these: */ + #define MVE_OPT_RGB_TO_YUV_BT601_STUDIO (0) + #define MVE_OPT_RGB_TO_YUV_BT601_FULL (1) + #define MVE_OPT_RGB_TO_YUV_BT709_STUDIO (2) + #define MVE_OPT_RGB_TO_YUV_BT709_FULL (3) + #define MVE_OPT_RGB_TO_YUV_BT2020_STUDIO (4) + #define MVE_OPT_RGB_TO_YUV_BT2020_FULL (5) + struct + { + uint16_t profile; + /* AVC/H.264 profiles */ + #define MVE_OPT_PROFILE_H264_BASELINE (1) + #define MVE_OPT_PROFILE_H264_MAIN (2) + #define MVE_OPT_PROFILE_H264_HIGH (3) + #define MVE_OPT_PROFILE_H264_HIGH_10 (4) + /* HEVC/H.265 profiles */ + #define MVE_OPT_PROFILE_H265_MAIN (1) + #define MVE_OPT_PROFILE_H265_MAIN_STILL (2) + #define MVE_OPT_PROFILE_H265_MAIN_INTRA (3) + #define MVE_OPT_PROFILE_H265_MAIN_10 (4) + /* VC-1 profiles */ + #define MVE_OPT_PROFILE_VC1_SIMPLE (1) + #define MVE_OPT_PROFILE_VC1_MAIN (2) + #define MVE_OPT_PROFILE_VC1_ADVANCED (3) + /* VP8 profiles */ + #define MVE_OPT_PROFILE_VP8_MAIN (1) + uint16_t level; + /* AVC/H.264 levels */ + #define MVE_OPT_LEVEL_H264_1 (1) + #define MVE_OPT_LEVEL_H264_1b (2) + #define MVE_OPT_LEVEL_H264_11 (3) + #define MVE_OPT_LEVEL_H264_12 (4) + #define MVE_OPT_LEVEL_H264_13 (5) + #define MVE_OPT_LEVEL_H264_2 (6) + #define MVE_OPT_LEVEL_H264_21 (7) + #define MVE_OPT_LEVEL_H264_22 (8) + #define MVE_OPT_LEVEL_H264_3 (9) + #define MVE_OPT_LEVEL_H264_31 (10) + #define MVE_OPT_LEVEL_H264_32 (11) + #define MVE_OPT_LEVEL_H264_4 (12) + #define MVE_OPT_LEVEL_H264_41 (13) + #define MVE_OPT_LEVEL_H264_42 (14) + #define MVE_OPT_LEVEL_H264_5 (15) + #define MVE_OPT_LEVEL_H264_51 (16) + #define MVE_OPT_LEVEL_H264_52 (17) + #define MVE_OPT_LEVEL_H264_6 (18) + #define MVE_OPT_LEVEL_H264_61 (19) + #define MVE_OPT_LEVEL_H264_62 (20) + #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE (32) + /* The value (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + level_idc) encodes a user + * supplied level_idc value in the range 0 to 255 inclusive. If the host supplies a level_idc + * value by this method then the encoder will encode this level_idc value in the bitstream + * without checking the validity of the level_idc value + */ + #define MVE_OPT_LEVEL_H264_USER_SUPPLIED_MAX (MVE_OPT_LEVEL_H264_USER_SUPPLIED_BASE + 255) + /* HEVC/H.265 levels */ + #define MVE_OPT_LEVEL_H265_MAIN_TIER_1 (1) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_1 (2) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_2 (3) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_2 (4) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_21 (5) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_21 (6) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_3 (7) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_3 (8) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_31 (9) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_31 (10) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_4 (11) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_4 (12) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_41 (13) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_41 (14) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_5 (15) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_5 (16) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_51 (17) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_51 (18) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_52 (19) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_52 (20) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_6 (21) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_6 (22) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_61 (23) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_61 (24) + #define MVE_OPT_LEVEL_H265_MAIN_TIER_62 (25) + #define MVE_OPT_LEVEL_H265_HIGH_TIER_62 (26) + } profile_level; + struct + { + int32_t mv_search_range_x; + int32_t mv_search_range_y; + } motion_vector_search_range; + struct + { + uint32_t type; + #define MVE_OPT_HUFFMAN_TABLE_DC_LUMA (1) + #define MVE_OPT_HUFFMAN_TABLE_AC_LUMA (2) + #define MVE_OPT_HUFFMAN_TABLE_DC_CHROMA (3) + #define MVE_OPT_HUFFMAN_TABLE_AC_CHROMA (4) + uint8_t number_of_huffman_of_code_length[ 16 ]; + uint8_t table[ 162 ]; /* 12 are used for DC, 162 for AC */ + } huffman_table; + struct + { + uint32_t type; + #define MVE_OPT_QUANT_TABLE_LUMA (1) + #define MVE_OPT_QUANT_TABLE_CHROMA (2) + uint8_t matrix[ 64 ]; + } quant_table; + struct + { + uint32_t qscale; + uint32_t qscale_luma; + uint32_t qscale_chroma; + uint32_t fps; + } jpeg_rate_control; //MVE_ENC_RC_JPEG + struct + { + /* For HEVC, tile_cols must be zero. For VP9, tile_rows + * and tile_cols must be powers of 2. */ + uint16_t tile_rows; + uint16_t tile_cols; + } tiles; + struct + { + uint16_t luma_bitdepth; + uint16_t chroma_bitdepth; + } bitdepth; + struct + { + /* Scale factors, and their square roots, for the lambda + * coefficients used by the encoder, in unsigned Q8 fixed-point + * format. Default (no scaling) is 1.0 (so 0x0100 in hex). + */ + uint16_t lambda_scale_i_q8; + uint16_t lambda_scale_sqrt_i_q8; + uint16_t lambda_scale_p_q8; + uint16_t lambda_scale_sqrt_p_q8; + uint16_t lambda_scale_b_ref_q8; + uint16_t lambda_scale_sqrt_b_ref_q8; + uint16_t lambda_scale_b_nonref_q8; + uint16_t lambda_scale_sqrt_b_nonref_q8; + } lambda_scale; + /* ARBITRARY_DOWNSCALE */ + struct + { + uint16_t width; + uint16_t height; + } downscaled_frame; + struct + { + uint32_t mode; + } dsl_pos; + struct + { + int16_t coef[3][3]; //coef[Y|U|V][R|G|B] + uint16_t offset[3]; + } yuv2rgb_params; + struct + { + uint8_t rgb2yuv_mode; + int16_t coef[3 * 3]; //coef[Y|U|V][R|G|B] + uint8_t luma_range[2]; + uint8_t chroma_range[2]; + uint8_t rgb_range[2]; + } rgb2yuv_params; + struct + { + uint16_t value; + } gray_uv_value; + struct + { + uint16_t mode; + } interp_mode; + struct + { + uint8_t crop_en; + /* left start x of luma in original image */ + uint16_t x; //included + /* top start y of luma in original image */ + uint16_t y; //included + /* cropped width of luma in original image */ + uint16_t width; + /* cropped height of luma in original image */ + uint16_t height; + } enc_src_crop; + + struct + { + uint8_t crop_en; + /* left start x of luma in original image */ + uint16_t x; //included + /* top start y of luma in original image */ + uint16_t y; //included + /* cropped width of luma in original image */ + uint16_t width; + /* cropped height of luma in original image */ + uint16_t height; + } dec_dst_crop; //ENABLE_DST_CROP + } data; +}; + +struct mve_request_release_ref_frame +{ + /* Decode only: For a frame buffer that MVE has returned + * marked as _REF_FRAME, the host can send this message + * to ask the MVE to release the buffer as soon as it is + * no longer used as reference anymore. (Otherwise, in + * normal operation, the host would re-enqueue the buffer + * to the MVE when it has been displayed and can be over- + * written with a new frame.) + * + * Note: When a frame is no longer used as reference depends + * on the stream being decoded, and there is no way to + * guarantee a short response time, the response may not + * come until the end of the stream. + */ + uint32_t buffer_address; +}; + + + + + + + + + + + + + + +/********************************************************************* + * + * RESPONSEs are messages from the host to the firmware + * + * Some of the MVE_RESPONSE_CODE_ codes are followed by one of the + * structs below. + * + *********************************************************************/ + +/* Sent when firmware has booted. + */ +struct mve_response_switched_in +{ + uint32_t core; +}; + +/* Sent when last core in a session has switched out. + */ +struct mve_response_switched_out +{ + uint32_t core; + uint32_t reason; + uint32_t sub_reason; +}; + +/* Response confirming state transition after either GO or STOP + * command from host. + */ +struct mve_response_state_change +{ + uint32_t new_state; + #define MVE_STATE_STOPPED (0) + #define MVE_STATE_RUNNING (2) +}; + +/* Message sent when the all cores in the session have dequeued a + * job from the firmware job queue. + */ +struct mve_response_job_dequeued +{ + uint32_t valid_job; +}; + +/* Fatal error message from firmware, if sent then no further + * operation is possible. + */ +struct mve_response_error +{ + uint32_t error_code; + #define MVE_ERROR_ABORT (1) + #define MVE_ERROR_OUT_OF_MEMORY (2) + #define MVE_ERROR_ASSERT (3) + #define MVE_ERROR_UNSUPPORTED (4) + #define MVE_ERROR_INVALID_BUFFER (6) + #define MVE_ERROR_INVALID_STATE (8) + #define MVE_ERROR_WATCHDOG (9) + + #define MVE_MAX_ERROR_MESSAGE_SIZE (128) + char message[ MVE_MAX_ERROR_MESSAGE_SIZE ]; +}; + +/* When a set-option succeeds, a confirmation message is + * sent, including the index-code for that particular option. + */ +struct mve_response_set_option_confirm +{ + uint32_t index; /* Same as 'index' in struct mve_request_set_option */ +}; + +/* If a set-option request fails, this message is returned. + * This is not a fatal error. The set-option had no effect, + * and the session is still alive. + * For example, trying to set an option with a too large + * or small parameter would result in this message. + * The included text string is meant for development and + * debugging purposes only. + * (When a set-option succeeds the set-option-confirm + * message code is sent instead.) + */ +struct mve_response_set_option_fail +{ + uint32_t index; /* Same as 'index' in struct mve_request_set_option */ + char message[ MVE_MAX_ERROR_MESSAGE_SIZE ]; +}; + +/* Decode only: This message is sent from MVE to the host so that it can + * allocate large enough output buffers. Output buffers that are to small + * will be returned to the host marked as 'rejected'. + */ +struct mve_response_frame_alloc_parameters +{ + /* Please note that the below information is a hint + * for what buffers to allocate, it does not say + * what actual resolution an output picture has. + */ + + /* To use if allocating PLANAR YUV output buffers: */ + uint16_t planar_alloc_frame_width; + uint16_t planar_alloc_frame_height; + + /* To use if allocating AFBC output buffers + * (if interlace, each field needs this size): + */ + uint32_t afbc_alloc_bytes; + + /* For situations where downscaled AFBC is supported, + * this number of bytes is needed for the downscaled frame. + */ + uint32_t afbc_alloc_bytes_downscaled; + + /* When the host allocates an AFBC frame buffer, it should normally set + * the the afbc_width_in_superblocks to be at least this recommended value. + * Buffers with smaller values are likely to be returned rejected by the MVE. + * See also comments above for afbc_alloc_bytes and + * afbc_alloc_bytes_downscaled, they describe the situations where the + * different values are used. + */ + uint16_t afbc_width_in_superblocks; + uint16_t afbc_width_in_superblocks_downscaled; + + /* For PLANAR YUV output, every plane's address need to be adjusted to get + * optimal AXI bursts when the pixel data is written, the values below may + * be used to calculate address offsets. + */ + uint16_t cropx; + uint16_t cropy; + + uint32_t mbinfo_alloc_bytes; /* Only for debugging */ + + + /* downscaled frame width/height for decode */ + /* ARBITRARY_DOWNSCALE */ + uint16_t dsl_frame_width; + uint16_t dsl_frame_height; + uint16_t dsl_pos_mode; + uint8_t ctu_size; /* EXPORT_SEQ_INFO */ + /* ENABLE_DST_CROP*/ + uint16_t dst_crop_x; + uint16_t dst_crop_y; + uint16_t dst_crop_width; + uint16_t dst_crop_height; +}; + +/* Decode only: This message is sent from MVE to the host so that it can + * allocate suitable output buffers. The needed size of the buffer is sent + * in a separate message (above). + * When MVE sends the message below, it enters a waiting-state and will not + * make any progress until the host sends an output-flush command, upon + * which MVE will return all output buffers, followed by a message saying + * that the output has been flushed. Only then should the host start + * enqueueing new output buffers. + */ +struct mve_response_sequence_parameters +{ + /* Other stream parameters affecting buffer allocation, + * any change in these values will trigger a flush. + */ + uint8_t interlace; /* 0 or 1 */ + uint8_t chroma_format; + #define MVE_CHROMA_FORMAT_MONO (0x0) + #define MVE_CHROMA_FORMAT_420 (0x1) + #define MVE_CHROMA_FORMAT_422 (0x2) + #define MVE_CHROMA_FORMAT_440 (0x3) + #define MVE_CHROMA_FORMAT_ARGB (0x4) + #define MVE_CHROMA_FORMAT_RGB (0x5) + #define MVE_CHROMA_FORMAT_444 (0x6) + #define MVE_CHROMA_FORMAT_OSD_ARGB (0x7) + + uint8_t bitdepth_luma; /* 8, 9 or 10 */ + uint8_t bitdepth_chroma; /* 8, 9 or 10 */ + uint8_t num_buffers_planar; /* number of planar buffers needed */ + uint8_t num_buffers_afbc; /* number of AFBC buffers needed, for + * AFBC output more buffers are needed + * (for planar output, the firmware + * will allocate extra memory via RPC) + */ + uint8_t range_mapping_enabled; /* VC-1 AP specific feature, if enabled + * then AFBC buffers may need special + * filtering before they can be + * displayed correctly. If the host is + * not able to do that, then planar output + * should be used, for which MVE + * automatically performs the filtering. + */ + uint8_t reserved0; +}; + +struct mve_response_ref_frame_unused +{ + /* Decode only: If requested by the host with the message + * MVE_REQUEST_CODE_RELEASE_REF_FRAME, the MVE will respond + * with this message when (if ever) the buffer is no longer + * used. + */ + uint32_t unused_buffer_address; +}; + + +/* This message is only for debugging and performance profiling. + * Is sent by the firmware if the corresponding options is enabled. + */ +struct mve_event_processed +{ + uint8_t pic_format; + uint8_t qp; + uint8_t pad0; + uint8_t pad1; + uint32_t parse_start_time; /* Timestamp, absolute time */ + uint32_t parse_end_time; /* Timestamp, absolute time */ + uint32_t parse_idle_time; /* Definition of idle here is waiting for in/out buffers or available RAM */ + + uint32_t pipe_start_time; /* Timestamp */ + uint32_t pipe_end_time; /* Timestamp, end-start = process time. Idle time while in a frame is + * not measured. */ + uint32_t pipe_idle_time; /* Always 0 in decode, */ + + uint32_t parser_coreid; /* Core used to parse this frame */ + uint32_t pipe_coreid; /* Core used to pipe this frame */ + + uint32_t bitstream_bits; /* Number of bitstream bits used for this frame. */ + + uint32_t intermediate_buffer_size; /* Size of intermediate (mbinfo/residuals) buffer after this frame was + * parsed. */ + uint32_t total_memory_allocated; /* after the frame was parsed. Including reference frames. */ + + uint32_t bus_read_bytes; /* bus read bytes */ + uint32_t bus_write_bytes; /* bus written bytes */ + + uint32_t afbc_bytes; /* afbc data transferred */ + + uint32_t slice0_end_time; /* Timestamp, absolute time */ + uint32_t stream_start_time; /* Timestamp, absolute stream start time */ + uint32_t stream_open_time; /* Timestamp, absolute stream open time */ +}; + +/* This message is only for debugging, is sent by the + * firmware if the corresponding option is enabled. + */ +struct mve_event_ref_frame +{ + uint32_t ref_addr; /* MVE virtual address of AFBC reference frame */ + uint32_t ref_width; /* Width of display area in luma pixels */ + uint32_t ref_height; /* Height of display area in luma pixels */ + uint32_t ref_mb_width; /* Width in macroblocks */ + uint32_t ref_mb_height; /* Height in macroblocks */ + uint32_t ref_left_crop; /* Left crop in luma pixels */ + uint32_t ref_top_crop; /* Top crop in luma pixels */ + uint32_t ref_frame_size; /* Total AFBC frame size in bytes */ + uint32_t ref_display_order; + uint16_t bit_width; /* bit width of the YUV either 8 or 10 */ + uint16_t tiled_headers; /* AFBC format is tiled */ +}; + +/* This message is only for debugging, is sent by the firmware if event tracing + * is enabled. + */ +struct mve_event_trace_buffers +{ + uint16_t reserved; + uint8_t num_cores; + uint8_t rasc_mask; + #define MVE_MAX_TRACE_BUFFERS 40 + /* this array will contain one buffer per rasc in rasc_mask per num_core */ + struct + { + uint32_t rasc_addr; /* rasc address of the buffer */ + uint32_t size; /* size of the buffer in bytes */ + } buffers[MVE_MAX_TRACE_BUFFERS]; +}; + +/* 'Events' are informative messages, the host is not required to react in + * any particular way. + */ +struct mve_response_event +{ + uint32_t event_code; + #define MVE_EVENT_ERROR_STREAM_CORRUPT (1) /* message, text string */ + #define MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED (2) /* message, text string */ + #define MVE_EVENT_PROCESSED (3) /* struct mve_event_processed */ + #define MVE_EVENT_REF_FRAME (4) /* struct mve_event_ref_frame */ + #define MVE_EVENT_TRACE_BUFFERS (5) /* struct mve_event_trace_buffers */ + union + { + struct mve_event_processed event_processed; + struct mve_event_ref_frame event_ref_frame; + struct mve_event_trace_buffers event_trace_buffers; + char message[ MVE_MAX_ERROR_MESSAGE_SIZE ]; + } event_data; +}__attribute__((packed)); + + + + + + + + + + + + + + + + +/********************************************************************* + * + * BUFFERs are sent both ways, from host to firmware and back again + * + * Each MVE_BUFFER_CODE_ code is followed by one of the structs + * below. + * + *********************************************************************/ + +/* Flags in mve_buffer_frame::frame_flags: + * Set by whom? Meaning: + * DECODE: ENCODE: + * MVE_BUFFER_FRAME_FLAG_INTERLACE host - Buffer is interlaced (both top and + * bottom fields are allocated) + * MVE_BUFFER_FRAME_FLAG_BOT_FIRST fw - Bottom field should be displayed + * first (only if interlaced) + * MVE_BUFFER_FRAME_FLAG_TOP_PRESENT fw host Top field present (or full frame if + * not interlaced) + * MVE_BUFFER_FRAME_FLAG_BOT_PRESENT fw - Bottom present (only if interlaced) + * + * MVE_BUFFER_FRAME_FLAG_ROTATION_* host host Decode: MVE will rotate the output frame + * according to this setting. + * Encode: MVE will rotate the input frame + * according to this setting before + * encoding them. + * MVE_BUFFER_FRAME_FLAG_SCALING_MASK host - Output pictures should be downscaled + * + * MVE_BUFFER_FRAME_FLAG_MIRROR_* - host Input frame should be mirrored before encoding + * + * MVE_BUFFER_FRAME_FLAG_REJECTED fw - Buffer was too small, host should re-allocate + * + * MVE_BUFFER_FRAME_FLAG_CORRUPT fw - Frame contains visual corruption + * + * MVE_BUFFER_FRAME_FLAG_DECODE_ONLY fw - Frame should not be displayed + * + * MVE_BUFFER_FRAME_FLAG_REF_FRAME fw - Frame is used by MVE as reference, host must + * not change, just re-enqueue when displayed + * MVE_BUFFER_FRAME_FLAG_EOS fw host This is the last frame in the stream. + */ + +/* mve_buffer_frame_planar stores uncompressed YUV pictures. + * ________________________________________ + * | ^ | | ^ + * |<-:--visible_frame_width---->| | : + * | : | | : + * | : | | : + * | visible_frame_height | | max_frame_height + * | : | | : + * | : | | : + * |__v__________________________| | : + * | | : + * |<-------------max_frame_width---------->| : + * |________________________________________| v + * + */ +struct mve_buffer_frame_planar +{ + /* Y,Cb,Cr top field */ + uint32_t plane_top[ 3 ]; + + /* Y,Cb,Cr bottom field (interlace only) */ + uint32_t plane_bot[ 3 ]; + + /* Stride between rows, in bytes */ + int32_t stride[ 3 ]; + + /* Size of largest frame allowed to put in this buffer */ + uint16_t max_frame_width; + uint16_t max_frame_height; + +}; + +/* mve_buffer_frame_afbc stores AFBC compressed content that is also used + * as the reference frame. Out of loop processing (crop, rotation, + * range reduction) must be supported by the user of this buffer and + * the parameters are signaled within the buffer descriptor below. + * ________________________________________ + * | ^ | + * | cropy | + * | v_____________________________ | + * |<-cropx->| ^ || + * | |<-:--visible_frame_width---->|| + * | | : || + * | | : || + * | | visible_frame_height || + * | | : || + * | | : || + * | |__v__________________________|| + * |________________________________________| + * + * <----- superblock_width ---------------> + * * afbc_width_in_superblocks + * + * Note that the sizes and cropping values need not be multiples of 16. + * + * For interlaced streams, the values refer to a full frame, + * while the output is actually separated into fields. Thus for fields, + * cropy and visible_frame_height should be divided by two. + * + * For dual-downscaled AFBC output (not supported for interlace), + * then the cropx, cropy, visible_frame_width and visible_frame_height + * should be divided by two for the downscaled plane. + */ +struct mve_buffer_frame_afbc +{ + uint32_t plane[ 2 ]; /* Addresses for up to two AFBC planes: + * Top and bottom fields for interlace, + * or standard and optional downscaled output. */ + uint32_t alloc_bytes[ 2 ]; /* Size of allocation for each plane */ + uint16_t cropx; /* Luma x crop */ + uint16_t cropy; /* Luma y crop */ + uint16_t afbc_width_in_superblocks[ 2 ]; /* Width of AFBC frame buffer, in units + * of superblock width (32 or 16). + * If dual-downscaled output is chosen, + * this width can be different for the + * two planes. + * For first plane: + * (cropx + frame_width) + * <= superblock_width * afbc_width... + */ + uint32_t afbc_params; /* AFBC parameters */ + #define MVE_BUFFER_FRAME_AFBC_TILED_BODY (0x00000001) /* Output body blocks should be tiled */ + #define MVE_BUFFER_FRAME_AFBC_TILED_HEADER (0x00000002) /* Output headers should be tiled */ + #define MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK (0x00000004) /* Super block is 32x8, default is 16x16, + * (only supported as input for encode) */ + #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_8BIT (0x00000008) /* For downscaled AFBC plane: It shall + * be 8-bit, even if full-scale is 10-bit */ + #define MVE_BUFFER_FRAME_AFBC_DN_FORCE_420 (0x00000010) /* For downscaled AFBC plane: It shall + * be 4:2:0, even if full-scale is 4:2:2 */ + #define MVE_BUFFER_FRAME_AFBC_STRIDE_SET_BY_MVE (0x00000020) /* Decode only: By default, the host should + set the afbc_width_in_superblocks. If the + value is zero, or if this bit is set, then + the MVE sets an appropriate value. */ + #define MVE_BUFFER_FRAME_AFBC_BLOCK_SPLIT (0x00000040) /* For Superblock layout, block_split mode should be enabled*/ + #define MVE_BUFFER_FRAME_AFBC_AV1_DECODER (0x00000080) /* Indicate av1dec or other codec*/ + #define MVE_BUFFER_FRAME_AFBC_AV1_TILE_HEADER (0x00000100) /* If av1dec, the tileheader is not decided by MVE_BUFFER_FRAME_AFBC_TILED_HEADER */ + #define MVE_BUFFER_FRAME_AFBC_AV1_MONO_CHROMA (0x00000200) /* If av1dec, mono-chrome afbc */ + +}; + +/* output from encoder, encoder statistics. + * buffer parameter to a buffer of this encoder statistics + * this struct indicates each size of statistics. + */ + +struct mve_buffer_param_enc_stats +{ + uint32_t mms_buffer_size; + uint32_t bitcost_buffer_size; + uint32_t qp_buffer_size; + uint32_t flags; + //ENC_STATS_FLAGS + #define MVE_BUFFER_ENC_STATS_FLAG_MMS (1<<0) + #define MVE_BUFFER_ENC_STATS_FLAG_BITCOST (1<<1) + #define MVE_BUFFER_ENC_STATS_FLAG_QP (1<<2) + #define MVE_BUFFER_ENC_STATS_FLAG_DROP (1<<3) + uint16_t stats_mb_width; + uint16_t stats_mb_height; +}; + +/* + * The FRAME buffer stores the common information for PLANAR and AFBC buffers, + * and a union of PLANAR and AFBC specific information. + */ +struct mve_buffer_frame +{ + /* For identification of the buffer, this is not changed by + * the firmware. */ + uint64_t host_handle; + #define OSD1_TAG 0x40000000 /* use bit30 stand for osd_1 */ + #define OSD0_TAG 0x20000000 /* use bit30 stand for osd_1 */ + + /* For matching input buffer with output buffers, the firmware + * copies these values between frame buffers and bitstream buffers. */ + uint64_t user_data_tag; + + /* Frame buffer flags, see commentary above */ + uint32_t frame_flags; + #define MVE_BUFFER_FRAME_FLAG_INTERLACE (0x00000001) + #define MVE_BUFFER_FRAME_FLAG_BOT_FIRST (0x00000002) + #define MVE_BUFFER_FRAME_FLAG_TOP_PRESENT (0x00000004) + #define MVE_BUFFER_FRAME_FLAG_BOT_PRESENT (0x00000008) + #define MVE_BUFFER_FRAME_FLAG_ROTATION_90 (0x00000010) + #define MVE_BUFFER_FRAME_FLAG_ROTATION_180 (0x00000020) + #define MVE_BUFFER_FRAME_FLAG_ROTATION_270 (0x00000030) + #define MVE_BUFFER_FRAME_FLAG_SCALING_MASK (0x000000C0) + #define MVE_BUFFER_FRAME_FLAG_MIRROR_HORI (0x00000100) + #define MVE_BUFFER_FRAME_FLAG_MIRROR_VERT (0x00000200) + #define MVE_BUFFER_FRAME_FLAG_FORCE_IDR (0x00000400) + #define MVE_BUFFER_FRAME_FLAG_RESET_GOP (0x00000800) + #define MVE_BUFFER_FRAME_FLAG_REJECTED (0x00001000) + #define MVE_BUFFER_FRAME_FLAG_CORRUPT (0x00002000) + #define MVE_BUFFER_FRAME_FLAG_DECODE_ONLY (0x00004000) + #define MVE_BUFFER_FRAME_FLAG_REF_FRAME (0x00008000) + #define MVE_BUFFER_FRAME_FLAG_EOS (0x00010000) + #define MVE_BUFFER_FRAME_FLAG_RESET_LTR_PEROID (0x00020000) + #define MVE_BUFFER_FRAME_FLAG_RESET_RC (0x00040000) + /*ARBITRARY_DOWNSCALE*/ + #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKX (0xFF000000) //8bit + #define MVE_BUFFER_FRAME_FLAG_SCALING_MASKY (0x00FE0000) //7bit + + /* Height (in luma samples) of visible part of frame, + * may be smaller than allocated frame size. */ + uint16_t visible_frame_height; + + /* Width (in luma samples) of visible part of frame, + * may be smaller than allocated frame size. */ + uint16_t visible_frame_width; + + /* Color format of buffer */ + uint16_t format; + /* format bitfield: */ + #define MVE_FORMAT_BF_C (0) /* 3 bits, chroma subsampling */ + #define MVE_FORMAT_BF_B (4) /* 4 bits, max bitdepth minus 8 */ + #define MVE_FORMAT_BF_N (8) /* 2 bits, number of planes */ + #define MVE_FORMAT_BF_V (12) /* 2 bits, format variant */ + #define MVE_FORMAT_BF_A (15) /* 1 bit, AFBC bit */ + /* formats: */ + #define MVE_FORMAT_YUV420_AFBC_8 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV420_AFBC_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV422_AFBC_8 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV422_AFBC_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_A) ) + + #define MVE_FORMAT_YUV420_I420 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_I420_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 10 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_NV12 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_NV21 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_P010 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (16 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_2P_10 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 2 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_Y0L2 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV420_AQB1 ( (MVE_CHROMA_FORMAT_420 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_YUY2 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_UYVY ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_Y210 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( (16 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_YUV422_1P_10 ( (MVE_CHROMA_FORMAT_422 << MVE_FORMAT_BF_C) | \ + ( (10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_RGBA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_BGRA_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_ARGB_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 2 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_ABGR_8888 ( (MVE_CHROMA_FORMAT_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 3 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_Y ( (MVE_CHROMA_FORMAT_MONO << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_Y_10 ( (MVE_CHROMA_FORMAT_MONO << MVE_FORMAT_BF_C) | \ + ( ( 10 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_RGB_888 ( (MVE_CHROMA_FORMAT_RGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_BGR_888 ( (MVE_CHROMA_FORMAT_RGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_RGB_3P ( (MVE_CHROMA_FORMAT_RGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_YUV444 ( (MVE_CHROMA_FORMAT_444 << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_YUV444_10 ( (MVE_CHROMA_FORMAT_444 << MVE_FORMAT_BF_C) | \ + ( ( 10 - 8) << MVE_FORMAT_BF_B) | \ + ( 3 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_ARGB_1555 ( (MVE_CHROMA_FORMAT_OSD_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 0 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_ARGB_4444 ( (MVE_CHROMA_FORMAT_OSD_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 1 << MVE_FORMAT_BF_V) ) + #define MVE_FORMAT_RGB_565 ( (MVE_CHROMA_FORMAT_OSD_ARGB << MVE_FORMAT_BF_C) | \ + ( ( 8 - 8) << MVE_FORMAT_BF_B) | \ + ( 1 << MVE_FORMAT_BF_N) | \ + ( 2 << MVE_FORMAT_BF_V) ) + + #define MVE_FORMAT_MBINFO (0x0001) /* only used for debugging */ + + #define MVE_FORMAT_UNUSED (0x0000) + + uint16_t reserved0; /* force 'data' to be 4-byte aligned */ + + union + { + struct mve_buffer_frame_planar planar; + struct mve_buffer_frame_afbc afbc; + } data; + + //uint32_t reserved1; /* force size to be multiple of 8 bytes */ + uint16_t mini_frame_y_start; + uint16_t mini_frame_y_end; +}; + +/* The bitstream buffer stores a number of bitstream bytes */ +struct mve_buffer_bitstream +{ + /* For identification of the buffer, this is not changed by + * the firmware. */ + uint64_t host_handle; + + /* For matching input buffer with output buffers, the firmware + * copies these values between frame buffers and bitstream buffers. */ + uint64_t user_data_tag; + + /* BufferFlags */ + uint32_t bitstream_flags; + #define MVE_BUFFER_BITSTREAM_FLAG_EOS (0x00000001) + #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME (0x00000010) + #define MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME (0x00000020) + #define MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG (0x00000080) + #define MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME (0x00000400) + #define MVE_BUFFER_BITSTREAM_FLAG_ENC_STATS (0x00010000) + #define MVE_BUFFER_BITSTREAM_FLAG_BSEOF (0x00100000) + + /* Length of allocated buffer */ + uint32_t bitstream_alloc_bytes; + + /* Byte offset from start to first byte */ + uint32_t bitstream_offset; + + /* Number of bytes in the buffer */ + uint32_t bitstream_filled_len; + + /* Pointer to buffer start */ + uint32_t bitstream_buf_addr; + + /* frame_type. 0:I, 1:p, 2:B, 3:b */ + uint8_t frame_type; + #define MVE_FRAME_TYPE_I 0 + #define MVE_FRAME_TYPE_P 1 + #define MVE_FRAME_TYPE_B 2 // B frame + #define MVE_FRAME_TYPE_LOWER_B 3 // b frame + #define MVE_FRAME_TYPE_P_KEY 4 + #define MVE_FRAME_TYPE_NO_REF_P 5 // only svct3 + #define MVE_FRAME_TYPE_GDR 6 // GDR + + uint8_t src_transform; + #define MVE_SRC_TRANSFORM_NONE (0) // none src transform + #define MVE_SRC_TRANSFORM_R90 (1) // rotate 90 degrees + #define MVE_SRC_TRANSFORM_R180 (2) // rotate 180 degrees + #define MVE_SRC_TRANSFORM_R270 (3) // rotate 270 degrees + #define MVE_SRC_TRANSFORM_VFLIP (4) // vertical flip (no rotation) + #define MVE_SRC_TRANSFORM_R90_VFLIP (5) // rotate 90 degrees and vertical flip + #define MVE_SRC_TRANSFORM_R180_VFLIP (6) // rotate 180 degrees and vertical flip + #define MVE_SRC_TRANSFORM_R270_VFLIP (7) // rotate 270 degrees and vertical flip + + /* Pad to force 8-byte alignment */ + //uint32_t reserved; + uint16_t bitstream_remaining_kb; // remaining kbytes of bitstream not returned to host. +}; + +/* + * Define a region in 16x16 units + * + * The region is macroblock positions (x,y) in the range + * mbx_left <= x < mbx_right + * mby_top <= y < mby_bottom + */ +struct mve_buffer_param_region +{ + uint16_t mbx_left; /* macroblock x left edge (inclusive) */ + uint16_t mbx_right; /* macroblock x right edge (exclusive) */ + uint16_t mby_top; /* macroblock y top edge (inclusive) */ + uint16_t mby_bottom; /* macroblock y bottom edge (exclusive) */ + int16_t qp_delta; /* QP delta value for this region, this + * delta applies to QP values in the ranges: + * H264: 0-51 + * HEVC: 0-51 + * VP9: 0-255 */ + uint8_t prio; /* the priority of this region */ + uint8_t force_intra; /* force intra to this ROI region, refer to macro: FEATURE_SUPPORT_ROI_MISC */; +}; + +/* input for encoder, + * the mve_buffer_param_regions buffer stores the information for FRAME buffers, + * and the information for regions of interest. + */ +struct mve_buffer_param_regions +{ + uint8_t n_regions; /* Number of regions */ + uint8_t reserved[ 3 ]; + #define MVE_MAX_FRAME_REGIONS 16 + struct mve_buffer_param_region region[ MVE_MAX_FRAME_REGIONS ]; +}; + +/* the block parameter record specifies the various properties of a quad */ +struct mve_block_param_record +{ + uint32_t qp_delta; /* Bitset of four 4-bit QP delta values for a quad */ + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (12) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (18) + #define MVE_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_QP_FORCE_FIELD (24) + #define MVE_BLOCK_PARAM_RECORD_QP_FORCE_FIELD_SZ (5) + #define MVE_BLOCK_PARAM_RECORD_QUAD_FORCE_INTRA (29) + #define MVE_BLOCK_PARAM_RECORD_QUAD_FORCE_INTRA_SZ (1) + #define MVE_BLOCK_PARAM_RECORD_QP_ABSOLUTE (30) + #define MVE_BLOCK_PARAM_RECORD_QP_ABSOLUTE_SZ (1) + #define MVE_BLOCK_PARAM_RECORD_QP_QUAD_SKIP (31) + #define MVE_BLOCK_PARAM_RECORD_QP_QUAD_SKIP_SZ (1) + + #define MVE_BLOCK_PARAM_RECORD_FORCE_NONE (0x00) + #define MVE_BLOCK_PARAM_RECORD_FORCE_QP (0x01) + uint32_t min_qp; + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_LEFT_16X16 (0) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_RIGHT_16X16 (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_TOP_RIGHT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_LEFT_16X16 (12) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_LEFT_16X16_SZ (6) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_RIGHT_16X16 (18) + #define MVE_BLOCK_PARAM_RECORD_MIN_QP_BOT_RIGHT_16X16_SZ (6) +}; + +/* block configuration uncompressed rows header. this configures the size of the + * uncompressed body. */ +struct mve_buffer_general_rows_uncomp_hdr +{ + uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */ + uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */ + uint8_t reserved[2]; +}; + +/* block configuration uncompressed rows body. this structure contains an array + * of block parameter records whose length is (n_cols_minus1 + 1) * (n_rows_minus1 + 1) + * elements. therefore the allocation of this structure needs to be dynamic and + * a pointer to the allocated memory should then be assigned to the general + * purpose buffer data pointer + */ +struct mve_buffer_general_rows_uncomp_body +{ + /* the size of this array is variable and not necessarily equal to 1. + * therefore the sizeof operator should not be used + */ + struct mve_block_param_record bpr[1]; +}; + +/* input for encoder, block level configurations. + * the row based block configurations can be defined in different formats. they + * are stored in the blk_cfgs union and identified by the blk_cfg_type member. + * these definitions consist of a header and body pair. the header part contains + * configuration information for the body. the body part describes the actual + * layout of the data buffer pointed to by the mve_buffer_general_hdr buffer_ptr. + */ +struct mve_buffer_general_block_configs +{ + uint8_t blk_cfg_type; + #define MVE_BLOCK_CONFIGS_TYPE_NONE (0x00) + #define MVE_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff) + uint8_t reserved[3]; + union + { + struct mve_buffer_general_rows_uncomp_hdr rows_uncomp; + } blk_cfgs; +}; + + +/* input for encoder */ +struct mve_buffer_param_qp +{ + /* QP (quantization parameter) for encode. + * + * When used to set fixed QP for encode, with rate control + * disabled, then the valid ranges are: + * H264: 0-51 + * HEVC: 0-51 + * VP8: 0-63 + * VP9: 0-63 + * Note: The QP must be set separately for I, P and B frames. + * + * But when this message is used with the regions-feature, + * then the valid ranges are the internal bitstream ranges: + * H264: 0-51 + * HEVC: 0-51 + * VP8: 0-127 + * VP9: 0-255 + */ + int32_t qp; + int32_t epr_iframe_enable; +}; + +struct mve_reset_gop_dynamic +{ + uint32_t reset_gop_pframes; +}; + +struct mve_reset_ltr_peroid_dynamic +{ + uint32_t reset_ltr_peroid_pframes; +}; + +/* output from decoder */ +struct mve_buffer_param_display_size +{ + uint16_t display_width; + uint16_t display_height; +}; + +/* output from decoder, colour information needed for hdr */ +struct mve_buffer_param_colour_description +{ + uint32_t flags; + #define MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID (1) + #define MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID (2) + + uint8_t range; /* Unspecified=0, Limited=1, Full=2 */ + #define MVE_BUFFER_PARAM_COLOUR_RANGE_UNSPECIFIED (0) + #define MVE_BUFFER_PARAM_COLOUR_RANGE_LIMITED (1) + #define MVE_BUFFER_PARAM_COLOUR_RANGE_FULL (2) + + uint8_t colour_primaries; /* see hevc spec. E.3.1 */ + uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */ + uint8_t matrix_coeff; /* see hevc spec. E.3.1 */ + + uint16_t mastering_display_primaries_x[3]; /* see hevc spec. D.3.27 */ + uint16_t mastering_display_primaries_y[3]; /* see hevc spec. D.3.27 */ + uint16_t mastering_white_point_x; /* see hevc spec. D.3.27 */ + uint16_t mastering_white_point_y; /* see hevc spec. D.3.27 */ + uint32_t max_display_mastering_luminance; /* see hevc spec. D.3.27 */ + uint32_t min_display_mastering_luminance; /* see hevc spec. D.3.27 */ + + uint32_t max_content_light_level; /* see hevc spec. D.3.35 */ + uint32_t avg_content_light_level; /* see hevc spec. D.3.35 */ + + uint8_t video_format_present_flag; + uint8_t video_format; + uint8_t aspect_ratio_info_present_flag; + uint8_t aspect_ratio_idc; + uint8_t timing_flag_info_present_flag; + uint16_t sar_width; + uint16_t sar_height; + uint32_t num_units_in_tick; + uint32_t time_scale; + + uint8_t pad[7]; // pad for 8-byte alignment +}; + +struct mve_buffer_param_sei_user_data_unregistered +{ + uint8_t flags; + #define MVE_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1) + uint8_t uuid[16]; + char user_data[256 - 35]; + uint8_t user_data_len; + + uint8_t reserved[5]; +}; + +/* output from decoder see hevc spec. D.3.3 */ +struct mve_buffer_param_frame_field_info +{ + uint8_t pic_struct; + uint8_t source_scan_type; + uint8_t duplicate_flag; + uint8_t reserved; +}; + +/* output from decoder, VC-1 specific feature only relevant + * if using AFBC output + */ +struct mve_buffer_param_range_map +{ + uint8_t luma_map_enabled; + uint8_t luma_map_value; + uint8_t chroma_map_enabled; + uint8_t chroma_map_value; +}; + +/* input for encoder */ +struct mve_buffer_param_rate_control +{ + uint32_t rate_control_mode; + #define MVE_OPT_RATE_CONTROL_MODE_OFF (0) + #define MVE_OPT_RATE_CONTROL_MODE_STANDARD (1) + #define MVE_OPT_RATE_CONTROL_MODE_VARIABLE (2) + #define MVE_OPT_RATE_CONTROL_MODE_CONSTANT (3) + #define MVE_OPT_RATE_CONTROL_MODE_C_VARIABLE (4) + uint32_t target_bitrate; /* in bits per second */ + uint32_t maximum_bitrate; /* in bits per second */ +}; + +/* input for encoder */ +struct mve_buffer_param_rate_control_qp_range +{ + int32_t qp_min; + int32_t qp_max; +}; + +/* input for encoder, see hevc spec. D.3.16 */ +struct mve_buffer_param_frame_packing +{ + uint32_t flags; + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_QUINCUNX_SAMPLING (1) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_SPATIAL_FLIPPING (2) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FRAME0_FLIPPED (4) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_FIELD_VIEWS (8) + #define MVE_BUFFER_PARAM_FRAME_PACKING_FLAG_CURRENT_FRAME_IS_FRAME0 (16) + + uint8_t frame_packing_arrangement_type; + uint8_t content_interpretation_type; + + uint8_t frame0_grid_position_x; + uint8_t frame0_grid_position_y; + uint8_t frame1_grid_position_x; + uint8_t frame1_grid_position_y; + + uint8_t reserved[ 2 ]; +}; + +struct mve_buffer_param_rectangle +{ + uint16_t x_left; /* pixel x left edge (inclusive) */ + uint16_t x_right; /* pixel x right edge (exclusive) */ + uint16_t y_top; /* pixel y top edge (inclusive) */ + uint16_t y_bottom; /* pixel y bottom edge (exclusive) */ +}; + +/* input for encoder, + * indicate which parts of the source picture has changed. + * The encoder can (optionally) use this information to + * reduce memory bandwidth. + * + * n_rectangles=0 indicates the source picture is unchanged. + * + * This parameter only applies to the picture that immediately + * follows (and not to subsequent ones). + */ +struct mve_buffer_param_change_rectangles +{ + uint8_t n_rectangles; /* Number of rectangles */ + uint8_t reserved[3]; + #define MVE_MAX_FRAME_CHANGE_RECTANGLES 2 + struct mve_buffer_param_rectangle rectangles[MVE_MAX_FRAME_CHANGE_RECTANGLES]; +}; + +typedef struct mve_buffer_param_osd_cfg { + uint8_t osd_inside_enable; + uint8_t osd_inside_alpha_enable; + uint8_t osd_inside_convert_color_enable; + uint8_t osd_inside_alpha_value; /* as alpha range [0~16], use u8 */ + uint8_t osd_inside_convert_color_threshold;/* threshold range [0~255], if input is 10bit, th * 4 */ + uint8_t osd_inside_rgb2yuv_mode;/* 0-601L, 1-601F, 2-709_L, 3-709_F */ + uint16_t osd_inside_start_x; /* pixel x left edge (inclusive) */ + uint16_t osd_inside_start_y; /* pixel y top edge (inclusive) */ + uint16_t reserved[3]; +} t_mve_buffer_param_osd_cfg; + +struct mve_buffer_param_osd_rectangles { + #define MVE_MAX_FRAME_OSD_REGION 2 + t_mve_buffer_param_osd_cfg osd_single_cfg[MVE_MAX_FRAME_OSD_REGION]; +}; + + +/* Parameters that are sent in the same communication channels + * as the buffers. A parameter applies to all subsequent buffers. + * Some types are only valid for decode, and some only for encode. + */ +struct mve_buffer_param +{ + uint32_t type; /* Extra data: */ + #define MVE_BUFFER_PARAM_TYPE_QP (2) /* qp */ + #define MVE_BUFFER_PARAM_TYPE_REGIONS (3) /* regions */ + #define MVE_BUFFER_PARAM_TYPE_DISPLAY_SIZE (5) /* display_size */ + #define MVE_BUFFER_PARAM_TYPE_RANGE_MAP (6) /* range_map */ + #define MVE_BUFFER_PARAM_TYPE_FRAME_RATE (9) /* arg, in frames per second, as a + * fixed point Q16 value, for example + * 0x001e0000 == 30.0 fps */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL (10) /* rate_control */ + #define MVE_BUFFER_PARAM_TYPE_QP_I (12) /* qp for I frames, when no rate control */ + #define MVE_BUFFER_PARAM_TYPE_QP_P (13) /* qp for P frames, when no rate control */ + #define MVE_BUFFER_PARAM_TYPE_QP_B (14) /* qp for B frames, when no rate control */ + #define MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION (15) /* colour_description */ + #define MVE_BUFFER_PARAM_TYPE_FRAME_PACKING (16) /* frame_packing */ + #define MVE_BUFFER_PARAM_TYPE_FRAME_FIELD_INFO (17) /* frame_field_info */ + #define MVE_BUFFER_PARAM_TYPE_GOP_RESET (18) /* no extra data */ + #define MVE_BUFFER_PARAM_TYPE_DPB_HELD_FRAMES (19) /* arg, number of output buffers that are + * complete and held by firmware in the + * DPB for reordering purposes. + * Valid after the next frame is output */ + #define MVE_BUFFER_PARAM_TYPE_CHANGE_RECTANGLES (20) /* change rectangles */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE (21) /* rate_control_qp_range */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE (23) /* arg */ + #define MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE_I (25) /* special range for I frames, + * rate_control_qp_range */ + #define MVE_BUFFER_PARAM_TYPE_SEI_USER_DATA_UNREGISTERED (26) /* sei user_data_unregistered */ + #define MVE_BUFFER_PARAM_TYPE_GOP_RESET_DYNAMIC (27) + #define MVE_BUFFER_PARAM_TYPE_GOP_RESET_LTR_PEROID_DYNAMIC (28) /* reset ltr peroid dynamiclly */ + #define MVE_BUFFER_PARAM_TYPE_ENC_STATS (29) /* encode stats */ + #define MVE_BUFFER_PARAM_TYPE_OSD_RECTANGLES (30) /* osd rectangles */ + + union + { + uint32_t arg; /* some parameters only need a uint32_t as argument */ + struct mve_buffer_param_qp qp; + struct mve_reset_gop_dynamic reset_gop_dynamic; + struct mve_reset_ltr_peroid_dynamic reset_ltr_peroid_dynamic; + struct mve_buffer_param_regions regions; + struct mve_buffer_param_display_size display_size; + struct mve_buffer_param_range_map range_map; + struct mve_buffer_param_rate_control rate_control; + struct mve_buffer_param_rate_control_qp_range rate_control_qp_range; + struct mve_buffer_param_colour_description colour_description; + struct mve_buffer_param_frame_packing frame_packing; + struct mve_buffer_param_frame_field_info frame_field_info; + struct mve_buffer_param_change_rectangles change_rectangles; + struct mve_buffer_param_sei_user_data_unregistered user_data_unregistered; + struct mve_buffer_param_enc_stats enc_stats; + struct mve_buffer_param_osd_rectangles osd_rectangles_buff; + } data; +}; + +/* output from decoder, assertive display statistics. + * buffer_ptr points to a buffer of luma quad average values for the picture + * that can be used as a thumbnail. the type of content used to generate the + * assertive display statistics is indicated by MVE_AD_STATS_PIC_FMT_INTERLACED. + * for progressive content; the arrangement is in raster format with dimensions + * thumbnail_width by thumbnail_height. the overall frame average luma and + * chroma values are returned in frame_average. + * for interlaced content; the arrangement is in raster format, top field + * followed by bottom field with each field having dimensions thumbnail_width by + * thumbnail_height. the field averages for luma and chroma values are combined + * and returned in an overall value for the frame (frame_average). + */ +struct mve_buffer_general_ad_stats +{ + + uint32_t frame_averages; + // bitfields + #define MVE_AD_STATS_PIC_AVGS_Y (0) + #define MVE_AD_STATS_PIC_AVGS_Y_SZ (12) + #define MVE_AD_STATS_PIC_AVGS_CB (12) + #define MVE_AD_STATS_PIC_AVGS_CB_SZ (10) + #define MVE_AD_STATS_PIC_AVGS_CR (22) + #define MVE_AD_STATS_PIC_AVGS_CR_SZ (10) + uint16_t thumbnail_width; + uint16_t thumbnail_height; + uint8_t ad_stats_flags; + #define MVE_AD_STATS_PIC_FMT_PROGRESSIVE (0) + #define MVE_AD_STATS_PIC_FMT_INTERLACED (1) + uint8_t reserved[3]; +}; + +/* The general purpose buffer header stores the common fields of an + * mve_buffer_general. it contains the pointer to the data buffer that contains + * the general purpose data + */ +struct mve_buffer_general_hdr +{ + /* For identification of the buffer, this is not changed by the firmware. */ + uint64_t host_handle; + + /* this depends upon the type of the general purpose buffer */ + uint64_t user_data_tag; + + /* pointer to the buffer containing the general purpose data. the format + * of this data is defined by the configuration in the mve_buffer_general */ + uint32_t buffer_ptr; + + /* size of the buffer pointed to by buffer_ptr */ + uint32_t buffer_size; + + /* selects the type of semantics to use for the general purpose buffer. it + * tags (or discriminates) the union config member in mve_buffer_general + */ + uint16_t type; /* Extra data: */ + #define MVE_BUFFER_GENERAL_TYPE_INVALID (0) /* invalid */ + #define MVE_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */ + #define MVE_BUFFER_GENERAL_TYPE_AD_STATS (2) /* assertive display statistics */ + #define MVE_BUFFER_GENERAL_TYPE_ENC_STATS (3) /* enc stats */ + /* size of the mve_buffer_general config member */ + uint16_t config_size; + + /* pad to force 8-byte alignment */ + uint32_t reserved; +}; + +/* The general purpose buffer consists of a header and a configuration. The + * header contains a pointer to a buffer whose format is described by the + * configuration. The type of configuration is indicated by the type value in + * the header. N.B. In use, the size of the config part of this structure is + * defined in the header and is not necessarily equal to that returned by the + * sizeof() operator. This allows a more size efficient communication between + * the host and firmware. + */ +struct mve_buffer_general +{ + struct mve_buffer_general_hdr header; + + /* used to describe the configuration of the general purpose buffer data + * pointed to be buffer_ptr + */ + union + { + struct mve_buffer_general_block_configs block_configs; + } config; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __FW_INCLUDE__MVE_PROTOCOL_DEF_H__ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_bitops.h b/drivers/media/platform/cix/cix_vpu/if/mvx_bitops.h new file mode 100755 index 000000000000..20ea99e03f06 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_bitops.h @@ -0,0 +1,91 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_BITOPS_H_ +#define _MVX_BITOPS_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +/** + * mvx_set_bit() - Set a bit in the bitmask. + * @bit: Bit to be set. + * @addr: Pointer to bitmask. + * + * Works similar to set_bit but uses no locks, is not atomic and protects + * agains overflow. + */ +static inline void mvx_set_bit(unsigned int bit, + uint64_t *addr) +{ + BUG_ON(bit >= (sizeof(*addr) * 8)); + *addr |= 1ull << bit; +} + +/** + * mvx_clear_bit() - Clear a bit in the bitmask. + * @bit: Bit to be cleared. + * @addr: Pointer to bitmask. + * + * Works similar to clear_bit but uses no locks, is not atomic and protects + * agains overflow. + */ +static inline void mvx_clear_bit(unsigned int bit, + uint64_t *addr) +{ + BUG_ON(bit >= (sizeof(*addr) * 8)); + *addr &= ~(1ull << bit); +} + +/** + * mvx_test_bit() - Test a bit in the bitmask. + * @bit: Bit to be tested. + * @addr: Pointer to bitmask. + * + * Works similar to test_bit but uses no locks, is not atomic and protects + * agains overflow. + */ +static inline bool mvx_test_bit(unsigned int bit, + uint64_t *addr) +{ + BUG_ON(bit >= (sizeof(*addr) * 8)); + return 0 != (*addr & (1ull << bit)); +} + +#endif /* _MVX_BITOPS_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_buffer.c b/drivers/media/platform/cix/cix_vpu/if/mvx_buffer.c new file mode 100755 index 000000000000..2b5147c3761e --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_buffer.c @@ -0,0 +1,649 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include "mvx_buffer.h" +#include "mvx_seq.h" +#include "mvx_log_group.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +/** + * Each 2x2 pixel square is subsampled. How many samples that are taken depends + * on the color format, but typically the luma channel (Y) gets 4 samples and + * the luma channels (UV) get 2 or 4 samples. + */ +#define SUBSAMPLE_PIXELS 2 + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +/** + * get_stride() - Get 3 plane stride for 2x2 pixels square. + * @format: MVX frame format. + * @stride: [plane 0, plane 1, plane 2][x, y] stride. + * + * Calculate the stride in bytes for each plane for a subsampled (2x2) pixels + * square. + * + * Return: 0 on success, else error code. + */ +static int get_stride(enum mvx_format format, + uint8_t *nplanes, + unsigned int stride[MVX_BUFFER_NPLANES][2]) +{ + switch (format) { + case MVX_FORMAT_YUV420_I420: + *nplanes = 3; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 1; + stride[1][1] = 1; + stride[2][0] = 1; + stride[2][1] = 1; + break; + case MVX_FORMAT_YUV420_NV12: + case MVX_FORMAT_YUV420_NV21: + *nplanes = 2; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 2; + stride[1][1] = 1; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV420_P010: + case MVX_FORMAT_YUV420_2P_10: + *nplanes = 2; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 4; + stride[1][1] = 1; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV420_Y0L2: + case MVX_FORMAT_YUV420_AQB1: + *nplanes = 1; + stride[0][0] = 8; + stride[0][1] = 1; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV422_YUY2: + case MVX_FORMAT_YUV422_UYVY: + *nplanes = 1; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV422_Y210: + case MVX_FORMAT_YUV422_1P_10: + case MVX_FORMAT_RGBA_8888: + case MVX_FORMAT_BGRA_8888: + case MVX_FORMAT_ARGB_8888: + case MVX_FORMAT_ABGR_8888: + *nplanes = 1; + stride[0][0] = 8; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_RGB_888: + case MVX_FORMAT_BGR_888: + *nplanes = 1; + stride[0][0] = 6; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_RGB_888_3P: + case MVX_FORMAT_YUV444: + *nplanes = 3; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 2; + stride[1][1] = 2; + stride[2][0] = 2; + stride[2][1] = 2; + break; + case MVX_FORMAT_Y: + *nplanes = 1; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_Y_10: + *nplanes = 1; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV444_10: + *nplanes = 3; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 4; + stride[1][1] = 2; + stride[2][0] = 4; + stride[2][1] = 2; + break; + case MVX_FORMAT_YUV420_I420_10: + *nplanes = 3; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 2; + stride[1][1] = 1; + stride[2][0] = 2; + stride[2][1] = 1; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int map_plane(struct mvx_buffer *buf, + mvx_mmu_va begin, + mvx_mmu_va end, + mvx_mmu_va *next_va, + unsigned int plane) +{ + mvx_mmu_va start_va = *next_va; + mvx_mmu_va stop_va = end; + mvx_mmu_va va = 0; + int ret; + uint32_t size; + struct mvx_buffer_plane *p = &buf->planes[plane]; + size = MVE_PAGE_SIZE * p->pages->capacity; + va = (start_va + MVE_PAGE_SIZE - 1) & ~(MVE_PAGE_SIZE - 1); + do { + mvx_mmu_va tried_size = MVE_PAGE_SIZE; + if (end - va < size) { + /* The remaining VA space to the end of region is not enough, + * so rewind to the beginning of region. Set 'stop va' to + * the searching start point. */ + va = begin; + stop_va = min(start_va, end - size); + } + ret = mvx_mmu_map_pages(buf->mmu, va, p->pages, MVX_ATTR_SHARED_RW, + MVX_ACCESS_READ_WRITE, &tried_size); + if (ret == 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Memory map buffer. buf=%px, plane=%u, va=0x%x, size=%zu.", + buf, plane, p->pages->va, + mvx_buffer_size(buf, plane)); + *next_va = (va + size) >= end ? begin : (va + size); + return 0; + } + + if (ret != -EAGAIN) + return ret; + + tried_size = (tried_size + MVE_PAGE_SIZE - 1) & ~(MVE_PAGE_SIZE - 1); + va += tried_size; + } while (va < stop_va); + + return -ENOMEM; +} + +/**************************************************************************** + * External functions + ****************************************************************************/ + +void mvx_buffer_show(struct mvx_buffer *buf, + struct seq_file *s) +{ + int i; + int ind = 0; + + mvx_seq_printf(s, "mvx_buffer", ind, "%px\n", buf); + + ind++; + mvx_seq_printf(s, "format", ind, "0x%x\n", buf->format); + mvx_seq_printf(s, "dir", ind, "%u\n", buf->dir); + mvx_seq_printf(s, "flags", ind, "0x%0x\n", buf->flags); + mvx_seq_printf(s, "width", ind, "%u\n", buf->width); + mvx_seq_printf(s, "height", ind, "%u\n", buf->height); + mvx_seq_printf(s, "nplanes", ind, "%u\n", buf->nplanes); + mvx_seq_printf(s, "planes", ind, "\n"); + ind++; + for (i = 0; i < buf->nplanes; ++i) { + char tag[10]; + struct mvx_buffer_plane *plane = &buf->planes[i]; + + scnprintf(tag, sizeof(tag), "#%d", i); + mvx_seq_printf(s, tag, ind, + "va: 0x%08x, size: %10zu, stride: %5u, filled: %10u\n", + mvx_buffer_va(buf, i), + mvx_buffer_size(buf, i), + plane->stride, + plane->filled); + } + + ind--; +} + +int mvx_buffer_construct(struct mvx_buffer *buf, + struct device *dev, + struct mvx_mmu *mmu, + enum mvx_direction dir, + unsigned int nplanes, + struct sg_table **sgt) +{ + int i; + + if (nplanes > MVX_BUFFER_NPLANES) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to construct buffer. Too many planes. nplanes=%u.", + nplanes); + return -EINVAL; + } + + memset(buf, 0, sizeof(*buf)); + + buf->dev = dev; + buf->mmu = mmu; + buf->dir = dir; + buf->nplanes = nplanes; + + for (i = 0; i < buf->nplanes; ++i) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + if (sgt[i] == NULL) + break; + + plane->pages = mvx_mmu_alloc_pages_sg(dev, sgt[i], 0); + if (IS_ERR(plane->pages)) + goto free_pages; + } + + return 0; + +free_pages: + while (i--) + mvx_mmu_free_pages(buf->planes[i].pages); + + return -ENOMEM; +} + +void mvx_buffer_destruct(struct mvx_buffer *buf) +{ + int i; + + mvx_buffer_unmap(buf); + + for (i = 0; i < buf->nplanes; i++) + if (buf->planes[i].pages != NULL) { + mvx_mmu_free_pages(buf->planes[i].pages); + buf->planes[i].pages = NULL; + } +} + +static int mvx_buffer_map_contiguous_planes(struct mvx_buffer *buf, + mvx_mmu_va begin, + mvx_mmu_va end, + mvx_mmu_va *next_va, + unsigned int *size) +{ + uint32_t cur_va; + int i; + int ret; + for (i = 0; i < buf->nplanes; i++) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + if (i == 0) { + ret = map_plane(buf, begin, end, next_va, i); + if (ret != 0) { + mvx_buffer_unmap(buf); + break; + } + cur_va = plane->pages->va + size[i]; + } else { + plane->pages->va = cur_va; + cur_va += size[i]; + } + } + + return ret; +} + +static int mvx_buffer_map_discrete_planes(struct mvx_buffer *buf, + mvx_mmu_va begin, + mvx_mmu_va end, + mvx_mmu_va *next_va) +{ + int i; + int ret; + + for (i = 0; i < buf->nplanes; i++) { + ret = map_plane(buf, begin, end, next_va, i); + if (ret != 0) { + mvx_buffer_unmap(buf); + break; + } + } + + return ret; +} + +int mvx_buffer_map(struct mvx_buffer *buf, + mvx_mmu_va begin, + mvx_mmu_va end, + mvx_mmu_va *next_va, + unsigned int *size) +{ + int i; + + for (i = 0; i < buf->nplanes; i++) + if (buf->planes[i].pages == NULL) + return -EINVAL; + + if (buf->is_contiguous) + return mvx_buffer_map_contiguous_planes(buf, begin, end, next_va, size); + else + return mvx_buffer_map_discrete_planes(buf, begin, end, next_va); +} + +void mvx_buffer_unmap(struct mvx_buffer *buf) +{ + int i; + + for (i = 0; i < buf->nplanes; i++) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + if ((plane->pages != NULL) && (plane->pages->va != 0)) { + mvx_mmu_unmap_pages(plane->pages); + } + } +} + +bool mvx_buffer_is_mapped(struct mvx_buffer *buf) +{ + return (buf->planes[0].pages != NULL) && + (buf->planes[0].pages->va != 0); +} + +int mvx_buffer_synch(struct mvx_buffer *buf, + enum dma_data_direction dir) +{ + int i; + int ret; + + for (i = 0; i < buf->nplanes; i++) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + if (plane->pages != NULL) { + ret = mvx_mmu_synch_pages(plane->pages, dir); + if (ret != 0) + return ret; + } + } + + return 0; +} + +void mvx_buffer_clear(struct mvx_buffer *buf) +{ + unsigned int i; + + buf->flags = 0; + + for (i = 0; i < buf->nplanes; i++) + buf->planes[i].filled = 0; +} + +int mvx_buffer_filled_set(struct mvx_buffer *buf, + unsigned int plane, + unsigned int filled, + unsigned int offset) +{ + struct mvx_buffer_plane *p = &buf->planes[plane]; + size_t size = mvx_buffer_size(buf, plane); + + if (plane > buf->nplanes) + return -EINVAL; + + if (size < (filled + offset)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Buffer plane too small. plane=%d, size=%zu, filled=%u, offset=%u.", + plane, size, filled, offset); + buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC; + return -ENOMEM; + } + + p->filled = filled; + p->offset = offset; + + return 0; +} + +size_t mvx_buffer_size(struct mvx_buffer *buf, + unsigned int plane) +{ + struct mvx_buffer_plane *p = &buf->planes[plane]; + + if (plane >= buf->nplanes || p->pages == NULL) + return 0; + + return mvx_mmu_size_pages(p->pages); +} + +mvx_mmu_va mvx_buffer_va(struct mvx_buffer *buf, + unsigned int plane) +{ + struct mvx_buffer_plane *p = &buf->planes[plane]; + + if (plane >= buf->nplanes || p->pages == NULL) + return 0; + + return p->pages->va + p->pages->offset; +} + +int mvx_buffer_frame_dim(enum mvx_format format, + unsigned int width, + unsigned int height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + unsigned int *setting_stride) +{ + unsigned int s[MVX_BUFFER_NPLANES][2]; + unsigned int __nplanes = *nplanes; + int i; + int ret; + + ret = get_stride(format, nplanes, s); + if (ret != 0) + return ret; + + for (i = 0; i < *nplanes; i++) { + const unsigned int stride_align = 1; + unsigned int tmp = DIV_ROUND_UP(width * s[i][0], + SUBSAMPLE_PIXELS); + /* Use optimal stride if no special stride was requested. */ + if (i >= __nplanes || stride[i] == 0){ + if (setting_stride[i] == stride[i]) { + stride[i] = max(stride[i], round_up(tmp, stride_align)); + } else { + stride[i] = round_up(tmp, stride_align); + } + } else { + /* Else make sure to round up to minimum stride. */ + stride[i] = max(stride[i], tmp); + } + size[i] = DIV_ROUND_UP(height * s[i][1], + SUBSAMPLE_PIXELS ) * stride[i]; + } + /* a patch for nv12/nv21/p010 odd height/width output*/ + if (*nplanes == 2 && (width % 2 != 0 || height % 2 != 0)) { + unsigned int tmp = DIV_ROUND_UP(width, SUBSAMPLE_PIXELS) * s[1][0]; + stride[1] = max(stride[1], tmp); + size[1] = DIV_ROUND_UP(height * s[1][1], + SUBSAMPLE_PIXELS ) * stride[1]; + } + return 0; +} + +void mvx_buffer_max_resolution(struct mvx_buffer *buf, + unsigned int *max_width, + unsigned int *max_height) +{ + unsigned int s[MVX_BUFFER_NPLANES][2]; + uint8_t nplanes; + int ret; + + ret = get_stride(buf->format, &nplanes, s); + if (ret != 0) { + *max_width = buf->width; + *max_height = buf->height; + return; + } + *max_width = DIV_ROUND_UP(buf->planes[0].stride * SUBSAMPLE_PIXELS, s[0][0]); + *max_height = buf->planes[0].length / buf->planes[0].stride; +} + +int mvx_buffer_frame_set(struct mvx_buffer *buf, + enum mvx_format format, + unsigned int width, + unsigned int height, + unsigned int *stride, + unsigned int *size, + bool interlaced) +{ + int i; + + buf->format = format; + buf->width = width; + buf->height = height; + + for (i = 0; i < buf->nplanes; i++) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + plane->stride = stride[i]; + plane->offset = 0; + + if (buf->dir == MVX_DIR_OUTPUT) { + int ret; + + ret = mvx_buffer_filled_set(buf, i, size[i], 0); + if (ret != 0) + return ret; + } + + /* Verify that plane has correct length. */ + if (plane->filled > 0 && plane->filled != size[i]) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Buffer filled length does not match plane size. plane=%i, filled=%zu, size=%u.", + i, plane->filled, size[i]); + //return -ENOMEM; + } + + /* Verify that there is no buffer overflow. */ + if ((plane->filled + plane->offset) > mvx_buffer_size(buf, i)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Buffer plane size is too small. plane=%i, size=%zu, size=%u.", + i, size[i], mvx_buffer_size(buf, i)); + return -ENOMEM; + } + } + + if (interlaced != false){ + buf->flags |= MVX_BUFFER_INTERLACE; + } else { + buf->flags &= ~MVX_BUFFER_INTERLACE; + } + return 0; +} + +int mvx_buffer_afbc_set(struct mvx_buffer *buf, + enum mvx_format format, + unsigned int width, + unsigned int height, + unsigned int afbc_width, + unsigned int size, + bool interlaced, + unsigned int plane) +{ + int ret; + + buf->format = format; + buf->width = width; + buf->height = height; + buf->planes[plane].offset = 0; + buf->planes[plane].afbc_width = afbc_width; + + if (buf->dir == MVX_DIR_INPUT) { + buf->crop_left = 0; + buf->crop_top = 0; + } + + if (buf->dir == MVX_DIR_OUTPUT) { + ret = mvx_buffer_filled_set(buf, plane, size, 0); + if (ret != 0) + return ret; + } + + if (size > mvx_buffer_size(buf, plane)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "AFBC buffer too small. buf_size=%zu, size=%u.", + size, mvx_buffer_size(buf, plane)); + return -ENOMEM; + } + + if (interlaced != false) + buf->flags |= MVX_BUFFER_INTERLACE; + + return 0; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_buffer.h b/drivers/media/platform/cix/cix_vpu/if/mvx_buffer.h new file mode 100755 index 000000000000..c14e6b21cab7 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_buffer.h @@ -0,0 +1,439 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_BUFFER_H_ +#define _MVX_BUFFER_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include "mvx_if.h" +#include "mvx_mmu.h" +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define MVX_BUFFER_NPLANES 3 +#define MVX_ROI_QP_NUMS 10 + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; + +/** + * struct mvx_buffer_plane - Plane information. + * @pages: MMU pages object. + * @stride: Horizontal stride in bytes. + * @filled: Number of bytes written to this plane. For a frame buffer this + * value should always match the size of the plane. + * @offset: Offset in bytes from begin of buffer to first bitstream data. + * @afbc_width: AFBC width in superblocks. + */ +struct mvx_buffer_plane { + struct mvx_mmu_pages *pages; + unsigned int stride; + unsigned int filled; + unsigned int offset; + unsigned int afbc_width; + unsigned int length; +}; + +struct mvx_buffer_general_encoder_stats +{ + uint32_t mms_buffer_size; + uint32_t bitcost_buffer_size; + uint32_t qp_buffer_size; + uint32_t flags; + //ENC_STATS_FLAGS + #define MVX_BUFFER_ENC_STATS_FLAG_MMS (1<<0) + #define MVX_BUFFER_ENC_STATS_FLAG_BITCOST (1<<1) + #define MVX_BUFFER_ENC_STATS_FLAG_QP (1<<2) + #define MVX_BUFFER_ENC_STATS_FLAG_DROP (1<<3) + uint32_t reserved; +}; + +struct mvx_buffer_general_rows_uncomp_hdr +{ + uint8_t n_cols_minus1; /* number of quad cols in picture minus 1 */ + uint8_t n_rows_minus1; /* number of quad rows in picture minus 1 */ + uint8_t reserved[2]; +}; + +struct mvx_buffer_general_block_configs +{ + uint8_t blk_cfg_type; + #define MVX_BLOCK_CONFIGS_TYPE_NONE (0x00) + #define MVX_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff) + uint8_t reserved[3]; + union + { + struct mvx_buffer_general_rows_uncomp_hdr rows_uncomp; + } blk_cfgs; +}; + +struct mvx_buffer_general_hdr +{ + /* For identification of the buffer, this is not changed by the firmware. */ + uint64_t host_handle; + + /* this depends upon the type of the general purpose buffer */ + uint64_t user_data_tag; + + /* pointer to the buffer containing the general purpose data. the format + * of this data is defined by the configuration in the mve_buffer_general */ + uint32_t buffer_ptr; + + /* size of the buffer pointed to by buffer_ptr */ + uint32_t buffer_size; + + /* selects the type of semantics to use for the general purpose buffer. it + * tags (or discriminates) the union config member in mve_buffer_general + */ + uint16_t type; /* Extra data: */ + #define MVX_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */ + #define MVX_BUFFER_GENERAL_TYPE_ENCODER_STATS (3) /* encoder_stats */ + + /* size of the mve_buffer_general config member */ + uint16_t config_size; + + /* pad to force 8-byte alignment */ + uint32_t reserved; +}; + +struct mvx_buffer_general +{ + struct mvx_buffer_general_hdr header; + + /* used to describe the configuration of the general purpose buffer data + * pointed to be buffer_ptr + */ + union + { + struct mvx_buffer_general_block_configs block_configs; + struct mvx_buffer_general_encoder_stats encoder_stats; + } config; +}; + +struct mvx_enc_stats +{ + uint32_t mms_buffer_size; + uint32_t bitcost_buffer_size; + uint32_t qp_buffer_size; + uint32_t flags; + //ENC_STATS_FLAGS + #define MVX_BUFFER_ENC_STATS_FLAG_MMS (1<<0) + #define MVX_BUFFER_ENC_STATS_FLAG_BITCOST (1<<1) + #define MVX_BUFFER_ENC_STATS_FLAG_QP (1<<2) + #define MVX_BUFFER_ENC_STATS_FLAG_DROP (1<<3) + uint32_t pic_index; +}; + +/** + * struct mvx_buffer - Buffer descriptor. + * @dev: Pointer to device. + * @mmu: Pointer to MMU. + * @head: List head used to add buffer to various queues. + * @format: Bitstream or frame format. + * @dir: Direction the buffer was allocated for. + * @user_data: User data copied from input- to output buffer. + * @flags: Buffer flags. + * @width: Frame width in pixels. + * @height: Frame height in pixels. + * @crop_left: Left crop in pixels. + * @crop_top: Top crop in pixels. + * @nplanes: Number of planes. + * @planes: Array or planes. + */ +struct mvx_buffer { + struct device *dev; + struct mvx_mmu *mmu; + struct list_head head; + enum mvx_format format; + enum mvx_direction dir; + uint64_t user_data; + unsigned int flags; + unsigned int in_flags; + unsigned int width; + unsigned int height; + unsigned int crop_left; + unsigned int crop_top; + unsigned int nplanes; + uint32_t offset; + struct mvx_buffer_plane planes[MVX_BUFFER_NPLANES]; + struct mvx_buffer_general general; + uint8_t frame_type; + uint8_t src_transform; + uint16_t bitstream_remaining_kb; + bool is_contiguous; +}; + +#define MVX_BUFFER_EOS 0x00000001 +#define MVX_BUFFER_EOF 0x00000002 +#define MVX_BUFFER_CORRUPT 0x00000004 +#define MVX_BUFFER_REJECTED 0x00000008 +#define MVX_BUFFER_DECODE_ONLY 0x00000010 +#define MVX_BUFFER_CODEC_CONFIG 0x00000020 +#define MVX_BUFFER_AFBC_TILED_HEADERS 0x00000040 +#define MVX_BUFFER_AFBC_TILED_BODY 0x00000080 +#define MVX_BUFFER_AFBC_32X8_SUPERBLOCK 0x00000100 +#define MVX_BUFFER_INTERLACE 0x00000200 +#define MVX_BUFFER_END_OF_SUB_FRAME 0x00000400 +#define MVX_BUFFER_FRAME_PRESENT 0x00000800 +#define MVX_BUFFER_SYNCFRAME 0x10000000 + +#define MVX_BUFFER_FRAME_FLAG_ROTATION_90 0x00001000 /* Frame is rotated 90 degrees */ +#define MVX_BUFFER_FRAME_FLAG_ROTATION_180 0x00002000 /* Frame is rotated 180 degrees */ +#define MVX_BUFFER_FRAME_FLAG_ROTATION_270 0x00003000 /* Frame is rotated 270 degrees */ +#define MVX_BUFFER_FRAME_FLAG_ROTATION_MASK 0x00003000 + +#define MVX_BUFFER_FRAME_FLAG_MIRROR_HORI 0x00010000 +#define MVX_BUFFER_FRAME_FLAG_MIRROR_VERT 0x00020000 +#define MVX_BUFFER_FRAME_FLAG_MIRROR_MASK 0x00030000 + +#define MVX_BUFFER_FRAME_FLAG_SCALING_2 0x00004000 /* Frame is scaled by half */ +#define MVX_BUFFER_FRAME_FLAG_SCALING_4 0x00008000 /* Frame is scaled by quarter */ +#define MVX_BUFFER_FRAME_FLAG_SCALING_MASK 0x0000C000 + +#define MVX_BUFFER_FRAME_FLAG_GENERAL 0x00040000 /* Frame is a general buffer */ +#define MVX_BUFFER_FRAME_FLAG_ROI 0x00080000 /* This buffer has a roi region */ +#define MVX_BUFFER_FRAME_FLAG_CHR 0x01000000 + +#define MVX_BUFFER_FRAME_NEED_REALLOC 0x00100000 /* This buffer needs realloc */ +#define MVX_BUFFER_FRAME_FLAG_GOP_REST 0x00200000 /* This buffer needs gop reset */ +#define MVX_BUFFER_FRAME_FLAG_LTR_REST 0x00400000 +#define MVX_BUFFER_FRAME_FLAG_FORCE_IDR (0x00800000) +#define MVX_BUFFER_ENC_STATS 0x02000000 + +#define MVX_BUFFER_FRAME_FLAG_OSD_1 0x04000000 +#define MVX_BUFFER_FRAME_FLAG_OSD_2 0x08000000 +#define MVX_BUFFER_FRAME_FLAG_OSD_MASK 0x0C000000 + +#define MVX_BUFFER_FRAME_FLAG_RESET_RC 0x40000000 + +/**************************************************************************** + * External functions + ****************************************************************************/ + +/** + * mvx_buffer_construct() - Construct the buffer object. + * @buf: Pointer to buffer. + * @dev: Pointer to device. + * @mmu: Pointer to MMU. + * @dir: Which direction the buffer was allocated for. + * @nplanes: Number of planes. + * @sgt: Array with SG tables. Each table contains a list of memory + * pages for corresponding plane. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_construct(struct mvx_buffer *buf, + struct device *dev, + struct mvx_mmu *mmu, + enum mvx_direction dir, + unsigned int nplanes, + struct sg_table **sgt); + +/** + * mvx_buffer_construct() - Destruct the buffer object. + * @buf: Pointer to buffer. + */ +void mvx_buffer_destruct(struct mvx_buffer *buf); + +/** + * mvx_buffer_map() - Map the buffer to the MVE virtual address space. + * @buf: Pointer to buffer. + * @begin: MVE virtual begin address. + * @end: MVE virtual end address. + * @size: size of each mvx_session plane. + * + * Try to MMU map the buffer anywhere between the begin and end addresses. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_map(struct mvx_buffer *buf, + mvx_mmu_va begin, + mvx_mmu_va end, + mvx_mmu_va *next_va, + unsigned int *size); + +/** + * mvx_buffer_unmap() - Unmap the buffer from the MVE virtual address space. + * @buf: Pointer to buffer. + */ +void mvx_buffer_unmap(struct mvx_buffer *buf); + +/** + * mvx_buffer_is_mapped() - Return if buffer has been mapped. + * @buf: Pointer to buffer. + * + * Return: True if mapped, else false. + */ +bool mvx_buffer_is_mapped(struct mvx_buffer *buf); + +/** + * mvx_buffer_synch() - Synch the data caches. + * @buf: Pointer to buffer. + * @dir: Data direction. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_synch(struct mvx_buffer *buf, + enum dma_data_direction dir); + +/** + * mvx_buffer_clear() - Clear and empty the buffer. + * @buf: Pointer to buffer. + */ +void mvx_buffer_clear(struct mvx_buffer *buf); + +/** + * mvx_buffer_filled_set() - Set filled bytes for each plane. + * @buf: Pointer to buffer. + * @plane: Plane index. + * @filled: Number of bytes filled. + * @offset: Number of bytes offset. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_filled_set(struct mvx_buffer *buf, + unsigned int plane, + unsigned int filled, + unsigned int offset); + +/** + * mvx_buffer_size() - Get size in bytes for a plane. + * @buf: Pointer to buffer. + * @plane: Which plane to get size for. + * + * Return: Size of plane. + */ +size_t mvx_buffer_size(struct mvx_buffer *buf, + unsigned int plane); + +/** + * mvx_buffer_va() - Get VA for a plane. + * @buf: Pointer to buffer. + * @plane: Plane index. + * + * Return: VA address of plane, 0 if unmapped. + */ +mvx_mmu_va mvx_buffer_va(struct mvx_buffer *buf, + unsigned int plane); + +/** + * mvx_buffer_frame_dim() - Get frame buffer dimensions. + * @format: Bitstream or frame format. + * @width: Width in pixels. + * @height: Height in pixels. + * @nplanes: Number of planes for this format. + * @stride: Horizontal stride in bytes. + * @size: Size in bytes for each plane. + * + * If *nplanes is larger than 0 then the stride is used as input to tell this + * function which stride that is desired, but it might be modified if the + * stride is too short or not optimal for the MVE hardware. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_frame_dim(enum mvx_format format, + unsigned int width, + unsigned int height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + unsigned int *setting_stride); + +/** + * mvx_buffer_max_resolution() - Get frame buffer max resolution. + * @buf: Pointer to buffer. + * @max_width: Resolution max width in pixels. + * @max_height: Resolution max height in pixels. + */ +void mvx_buffer_max_resolution(struct mvx_buffer *buf, + unsigned int *max_width, + unsigned int *max_height); + +/** + * mvx_buffer_frame_set() - Set frame dimensions. + * @buf: Pointer to buffer. + * @format: Bitstream or frame format. + * @width: Width in pixels. + * @height: Height in pixels. + * @stride: Horizontal stride in bytes. + * @size: Size in bytes for each plane. + * @interlaced: Defines if the buffer is interlaced. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_frame_set(struct mvx_buffer *buf, + enum mvx_format format, + unsigned int width, + unsigned int height, + unsigned int *stride, + unsigned int *size, + bool interlaced); + +/** + * mvx_buffer_afbc_set() - Set AFBC dimensions. + * @buf: Pointer to buffer. + * @format: Bitstream or frame format. + * @width: Width in pixels. + * @height: Height in pixels. + * @afbc_width: AFBC width in superblocks. + * @size: Size in bytes for AFBC plane[i]. + * @interlaced: Defines if the buffer is interlaced. + * @plane: Plane index. + * + * Return: 0 on success, else error code. + */ +int mvx_buffer_afbc_set(struct mvx_buffer *buf, + enum mvx_format format, + unsigned int width, + unsigned int height, + unsigned int afbc_width, + unsigned int size, + bool interlaced, + unsigned int plane); + +/** + * mvx_buffer_show() - Print debug information into seq-file. + * @buf: Pointer to buffer. + * @s: Seq-file to print to. + */ +void mvx_buffer_show(struct mvx_buffer *buf, + struct seq_file *s); + +#endif /* _MVX_BUFFER_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware.c b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware.c new file mode 100755 index 000000000000..8fd392eca9e8 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware.c @@ -0,0 +1,625 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvx_if.h" +#include "mvx_log_group.h" +#include "mvx_firmware_cache.h" +#include "mvx_firmware_priv.h" +#include "mvx_mmu.h" +#include "mvx_secure.h" +#include "mvx_seq.h" +#include "mvx_bitops.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define FW_TEXT_BASE_ADDR 0x1000u + +/**************************************************************************** + * Private functions + ****************************************************************************/ + +/** + * test_bit_32() - 32 bit version Linux test_bit. + * + * Test if bit is set in bitmap array. + */ +static bool test_bit_32(int bit, + const uint32_t *addr) +{ + return 0 != (addr[bit >> 5] & (1 << (bit & 0x1f))); +} + +/** + * get_major_version() - Get firmware major version. + * + * Return: Major version. + */ +static unsigned int get_major_version(const struct mvx_fw_bin *fw_bin) +{ + if (fw_bin->securevideo != false) + return fw_bin->secure.securefw->protocol.major; + else + return fw_bin->nonsecure.header->protocol_major; +} + +/** + * get_minor_version() - Get firmware minor version. + * + * Return: Minor version. + */ +static unsigned int get_minor_version(const struct mvx_fw_bin *fw_bin) +{ + if (fw_bin->securevideo != false) + return fw_bin->secure.securefw->protocol.minor; + else + return fw_bin->nonsecure.header->protocol_minor; +} + +/** + * fw_unmap() - Remove MMU mappings and release allocated memory. + */ +static void fw_unmap(struct mvx_fw *fw) +{ + unsigned int i; + uint32_t begin; + uint32_t end; + uint64_t mask; + int ret; + + mask = fw->core_mask; + + if (fw->fw_bin->securevideo == false) { + /* Unmap a region of 4 MB for each core. */ + for (i = 0; i < fw->ncores; i++) { + if (mvx_test_bit(i, &mask)) { + ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + i, + &begin, &end); + if (ret == 0) + mvx_mmu_unmap_va(fw->mmu, begin, + 4 * 1024 * 1024); + } + } + + if (!IS_ERR_OR_NULL(fw->text)) + mvx_mmu_free_pages(fw->text); + + if (!IS_ERR_OR_NULL(fw->bss)) + mvx_mmu_free_pages(fw->bss); + + if (!IS_ERR_OR_NULL(fw->bss_shared)) + mvx_mmu_free_pages(fw->bss_shared); + } + + fw->ops.unmap_protocol(fw); +} + +/** + * fw_map_core() - Map pages for the text and BSS segments for one core. + * + * This function assumes that the fw instance has been correctly allocated + * and instansiated and will therefor not make any NULL pointer checks. It + * assumes that all pointers - for example to the mmu or firmware binary - have + * been correctly set up. + */ +static int fw_map_core(struct mvx_fw *fw, + unsigned int core) +{ + int ret; + const struct mvx_fw_header *header = fw->fw_bin->nonsecure.header; + mvx_mmu_va fw_base; + mvx_mmu_va end; + mvx_mmu_va va; + unsigned int i; + unsigned int bss_cnt = core * fw->fw_bin->nonsecure.bss_cnt; + unsigned int bss_scnt = 0; + + /* + * Get the base address where the pages for this cores should be + * mapped. + */ + ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + core, &fw_base, &end); + if (ret != 0) + return ret; + + /* Map text segment. */ + ret = mvx_mmu_map_pages(fw->mmu, + fw_base + FW_TEXT_BASE_ADDR, + fw->text, + MVX_ATTR_PRIVATE, + MVX_ACCESS_EXECUTABLE, + NULL); + if (ret != 0) + return ret; + + /* Map bss shared and private pages. */ + va = header->bss_start_address; + for (i = 0; i < header->bss_bitmap_size; i++) { + if (va >= header->master_rw_start_address && + va < (header->master_rw_start_address + + header->master_rw_size)) + ret = mvx_mmu_map_pa( + fw->mmu, + fw_base + va, + fw->bss_shared->pages[bss_scnt++], + MVE_PAGE_SIZE, + MVX_ATTR_PRIVATE, + MVX_ACCESS_READ_WRITE); + else if (test_bit_32(i, header->bss_bitmap)) + ret = mvx_mmu_map_pa(fw->mmu, + fw_base + va, + fw->bss->pages[bss_cnt++], + MVE_PAGE_SIZE, + MVX_ATTR_PRIVATE, + MVX_ACCESS_READ_WRITE); + + if (ret != 0) + return ret; + + va += MVE_PAGE_SIZE; + } + + return 0; +} + +/** + * fw_map() - Map up MMU tables. + */ +static int fw_map(struct mvx_fw *fw) +{ + int ret; + unsigned int i; + uint64_t mask = fw->core_mask; + mvx_mmu_va fw_base; + mvx_mmu_va end; + + if (fw->fw_bin->securevideo != false) { + /* Map MMU tables for each core. */ + for (i = 0; i < fw->ncores; i++) { + if (mvx_test_bit(i, &mask)) { + phys_addr_t l2 = fw->fw_bin->secure.securefw->l2pages + + i * MVE_PAGE_SIZE; + + ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + i, + &fw_base, &end); + if (ret != 0) + return ret; + + ret = mvx_mmu_map_l2(fw->mmu, fw_base, l2); + if (ret != 0) + goto unmap_fw; + } + } + } else { + const struct mvx_fw_bin *fw_bin = fw->fw_bin; + const struct mvx_fw_header *header = fw_bin->nonsecure.header; + + /* Allocate memory for text segment. */ + fw->text = mvx_mmu_alloc_pages(fw->dev, fw_bin->nonsecure.text_cnt, 0, + GFP_KERNEL); + if (IS_ERR(fw->text)) + return PTR_ERR(fw->text); + + /* Allocate memory for BSS segment. */ + fw->bss = mvx_mmu_alloc_pages( fw->dev, fw_bin->nonsecure.bss_cnt * fw->ncores, 0, + GFP_KERNEL | __GFP_ZERO); + if (IS_ERR(fw->bss)) { + ret = PTR_ERR(fw->bss); + goto unmap_fw; + } + + /* Allocate memory for BSS shared segment. */ + fw->bss_shared = mvx_mmu_alloc_pages(fw->dev, fw_bin->nonsecure.sbss_cnt, 0, + GFP_KERNEL | __GFP_ZERO); + if (IS_ERR(fw->bss_shared)) { + ret = PTR_ERR(fw->bss_shared); + goto unmap_fw; + } + + /* Map MMU tables for each core. */ + for (i = 0; i < fw->ncores; i++) { + if (mvx_test_bit(i, &mask)) { + ret = fw_map_core(fw, i); + if (ret != 0) + goto unmap_fw; + } + } + + /* Copy firmware binary. */ + ret = fw->ops.get_region(MVX_FW_REGION_CORE_0 + __ffs(mask), &fw_base, &end); + if (ret != 0) + goto unmap_fw; + ret = mvx_mmu_write(fw->mmu, fw_base + FW_TEXT_BASE_ADDR, + fw_bin->nonsecure.fw->data, + header->text_length); + if (ret != 0) + goto unmap_fw; + } + + /* Map MMU tables for the message queues. */ + ret = fw->ops.map_protocol(fw); + if (ret != 0) + goto unmap_fw; + + return 0; + +unmap_fw: + fw_unmap(fw); + + return ret; +} + +/** + * Callbacks and handlers for FW stats. + */ +static int fw_stat_show(struct seq_file *s, + void *v) +{ + struct mvx_fw *fw = (struct mvx_fw *)s->private; + const struct mvx_fw_bin *fw_bin = fw->fw_bin; + + mvx_seq_printf(s, "mvx_fw", 0, "%px\n", fw); + seq_puts(s, "\n"); + + mvx_seq_printf(s, "mmu", 0, "%px\n", fw->mmu); + + if (fw_bin->securevideo == false) { + mvx_seq_printf(s, "text", 0, "%px\n", fw->text); + mvx_seq_printf(s, "bss", 0, "%px\n", fw->bss); + mvx_seq_printf(s, "bss_shared", 0, "%px\n", fw->bss_shared); + } + + seq_puts(s, "\n"); + + mvx_seq_printf(s, "msg_host", 0, "%px\n", fw->msg_host); + mvx_seq_printf(s, "msg_mve", 0, "%px\n", fw->msg_mve); + mvx_seq_printf(s, "buf_in_host", 0, "%px\n", fw->buf_in_host); + mvx_seq_printf(s, "buf_in_mve", 0, "%px\n", fw->buf_in_mve); + mvx_seq_printf(s, "buf_out_host", 0, "%px\n", fw->buf_out_host); + mvx_seq_printf(s, "buf_out_mve", 0, "%px\n", fw->buf_out_mve); + seq_puts(s, "\n"); + + fw->ops.print_stat(fw, 0, s); + seq_puts(s, "\n"); + + mvx_seq_printf(s, "rpc", 0, "%px\n", fw->rpc); + mvx_seq_printf(s, "ncores", 0, "%u\n", fw->ncores); + mvx_seq_printf(s, "msg_pending", 0, "%u\n", fw->msg_pending); + seq_puts(s, "\n"); + + mvx_seq_printf(s, "ops.map_protocol", 0, "%ps\n", + fw->ops.map_protocol); + mvx_seq_printf(s, "ops.unmap_protocol", 0, "%ps\n", + fw->ops.unmap_protocol); + mvx_seq_printf(s, "ops.get_region", 0, "%ps\n", + fw->ops.get_region); + mvx_seq_printf(s, "ops.get_message", 0, "%ps\n", + fw->ops.get_message); + mvx_seq_printf(s, "ops.put_message", 0, "%ps\n", + fw->ops.put_message); + mvx_seq_printf(s, "ops.handle_rpc", 0, "%ps\n", + fw->ops.handle_rpc); + seq_puts(s, "\n"); + + mvx_seq_printf(s, "fw_bin", 0, "%px\n", fw_bin); + mvx_seq_printf(s, "fw_bin.cache", 0, "%px\n", fw_bin->cache); + mvx_seq_printf(s, "fw_bin.filename", 0, "%s\n", fw_bin->filename); + mvx_seq_printf(s, "fw_bin.format", 0, "%u\n", fw_bin->format); + mvx_seq_printf(s, "fw_bin.dir", 0, "%s\n", + (fw_bin->dir == MVX_DIR_INPUT) ? "in" : + (fw_bin->dir == MVX_DIR_OUTPUT) ? "out" : + "invalid"); + + if (fw_bin->securevideo == false) { + mvx_seq_printf(s, "fw_bin.text_cnt", 0, "%u\n", + fw_bin->nonsecure.text_cnt); + mvx_seq_printf(s, "fw_bin.bss_cnt", 0, "%u\n", + fw_bin->nonsecure.bss_cnt); + mvx_seq_printf(s, "fw_bin.sbss_cnt", 0, "%u\n", + fw_bin->nonsecure.sbss_cnt); + } + + return 0; +} + +static int fw_stat_open(struct inode *inode, + struct file *file) +{ + return single_open(file, fw_stat_show, inode->i_private); +} + +static const struct file_operations fw_stat_fops = { + .open = fw_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static void *rpcmem_seq_start(struct seq_file *s, + loff_t *pos) +{ + struct mvx_fw *fw = s->private; + int ret; + + ret = mutex_lock_interruptible(&fw->rpcmem_mutex); + if (ret != 0) + return ERR_PTR(-EINVAL); + + return mvx_seq_hash_start(fw->dev, fw->rpc_mem, HASH_SIZE( + fw->rpc_mem), *pos); +} + +static void *rpcmem_seq_next(struct seq_file *s, + void *v, + loff_t *pos) +{ + struct mvx_fw *fw = s->private; + + return mvx_seq_hash_next(v, fw->rpc_mem, HASH_SIZE(fw->rpc_mem), pos); +} + +static void rpcmem_seq_stop(struct seq_file *s, + void *v) +{ + struct mvx_fw *fw = s->private; + + mutex_unlock(&fw->rpcmem_mutex); + mvx_seq_hash_stop(v); +} + +static int rpcmem_seq_show(struct seq_file *s, + void *v) +{ + struct mvx_seq_hash_it *it = v; + struct mvx_mmu_pages *pages = hlist_entry(it->node, + struct mvx_mmu_pages, node); + + if (pages == NULL) + return 0; + + seq_printf(s, "va = %08x, cap = %08zu, count = %08zu\n", + pages->va, pages->capacity, pages->count); + + return 0; +} + +static const struct seq_operations rpcmem_seq_ops = { + .start = rpcmem_seq_start, + .next = rpcmem_seq_next, + .stop = rpcmem_seq_stop, + .show = rpcmem_seq_show +}; + +static int rpcmem_open(struct inode *inode, + struct file *file) +{ + int ret; + struct seq_file *s; + struct mvx_fw *fw = inode->i_private; + + ret = seq_open(file, &rpcmem_seq_ops); + if (ret != 0) + return ret; + + s = file->private_data; + s->private = fw; + + return 0; +} + +static const struct file_operations rpcmem_fops = { + .open = rpcmem_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +/** + * fw_debugfs_init() - Create debugfs entries for mvx_fw. + */ +static int fw_debugfs_init(struct mvx_fw *fw, + struct dentry *parent) +{ + int ret; + struct dentry *dentry; + + fw->dentry = debugfs_create_dir("fw", parent); + if (IS_ERR_OR_NULL(fw->dentry)) + return -ENOMEM; + + dentry = debugfs_create_file("stat", 0400, fw->dentry, fw, + &fw_stat_fops); + if (IS_ERR_OR_NULL(dentry)) { + ret = -ENOMEM; + goto remove_dentry; + } + + if (fw->fw_bin->securevideo == false) { + ret = mvx_mmu_pages_debugfs_init(fw->text, "text", fw->dentry); + if (ret != 0) + goto remove_dentry; + + ret = mvx_mmu_pages_debugfs_init(fw->bss, "bss", fw->dentry); + if (ret != 0) + goto remove_dentry; + + ret = mvx_mmu_pages_debugfs_init(fw->bss_shared, "bss_shared", + fw->dentry); + if (ret != 0) + goto remove_dentry; + + dentry = debugfs_create_file("rpc_mem", 0400, fw->dentry, fw, + &rpcmem_fops); + if (IS_ERR_OR_NULL(dentry)) { + ret = -ENOMEM; + goto remove_dentry; + } + } + + return 0; + +remove_dentry: + debugfs_remove_recursive(fw->dentry); + return ret; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_fw_factory(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask, + struct dentry *parent) +{ + unsigned int major; + unsigned int minor; + unsigned int ncores; + int ret; + + /* Verifty that firmware loading was successful. */ + if ((fw_bin->securevideo == false && + IS_ERR_OR_NULL(fw_bin->nonsecure.fw)) || + (fw_bin->securevideo != false && + IS_ERR_OR_NULL(fw_bin->secure.securefw))) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware binary was loaded with error."); + return -EINVAL; + } + + ncores = hweight32(core_mask); + if (fw_bin->securevideo != false && + ncores > fw_bin->secure.securefw->ncores) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Too few secure cores setup. max_ncores=%u, ncores=%u.", + fw_bin->secure.securefw->ncores, ncores); + return -EINVAL; + } + + major = get_major_version(fw_bin); + minor = get_minor_version(fw_bin); + + /* Call constructor for derived class based on protocol version. */ + switch (major) { + case 2: + ret = mvx_fw_construct_v2(fw, fw_bin, mmu, session, client_ops, + csession, core_mask, major, minor); + if (ret != 0) + return ret; + + break; + case 3: + ret = mvx_fw_construct_v3(fw, fw_bin, mmu, session, client_ops, + csession, core_mask, major, minor); + if (ret != 0) + return ret; + + break; + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported firmware interface revision. major=%u, minor=%u.", + major, minor); + return -EINVAL; + } + + /* Map up the MMU tables. */ + ret = fw_map(fw); + if (ret != 0) + return ret; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + ret = fw_debugfs_init(fw, parent); + + return ret; +} + +int mvx_fw_construct(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask) +{ + memset(fw, 0, sizeof(*fw)); + + fw->dev = fw_bin->dev; + fw->mmu = mmu; + fw->session = session; + fw->client_ops = client_ops; + fw->csession = csession; + /** + * Other cores always read firmware instance0 text_data(virtual address + * 0x0000_0000 - 0x0000_00018) at the begining. Therefore, must do map for + * core0(firmware instance0) even thouth disallow it. + */ + core_mask |= 1; + fw->ncores = fls(core_mask); // used in fw_map(). fls() to make sure all the cores can be mapped + fw->core_mask = core_mask; + fw->fw_bin = fw_bin; + mutex_init(&fw->mutex); + mutex_init(&fw->rpcmem_mutex); + mutex_init(&fw->mem_mutex); + + return 0; +} + +void mvx_fw_destruct(struct mvx_fw *fw) +{ + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(fw->dentry); + + /* Release and unmap allocates pages. */ + fw_unmap(fw); +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware.h b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware.h new file mode 100755 index 000000000000..1bec45de3c5e --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware.h @@ -0,0 +1,1163 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_FIRMWARE_H_ +#define _MVX_FIRMWARE_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include "mvx_if.h" +#include "mvx_buffer.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define MVX_FW_HTABLE_BITS 3 +#define MVX_FW_QUANT_LEN 64 + +/**************************************************************************** + * Firmware communication types + ****************************************************************************/ + +/** + * enum mvx_fw_state - Firmware state. + */ +enum mvx_fw_state { + MVX_FW_STATE_STOPPED, + MVX_FW_STATE_RUNNING +}; + +/** + * struct mvx_fw_job - Job request. + * @cores: Number for cores to use. + * @frames: Number of frames to process before job is switched out. + */ +struct mvx_fw_job { + unsigned int cores; + unsigned int frames; +}; + +/** + * struct mvx_fw_qp_range - QP range. + */ +struct mvx_fw_qp_range { + int min; + int max; +}; + +/** + * struct mvx_fw_profile_level - Profile and level. + */ +struct mvx_fw_profile_level { + unsigned int profile; + unsigned int level; + unsigned int tier; +}; + +/** + * struct mvx_fw_tile - Tile size. + */ +struct mvx_fw_tile { + unsigned int rows; + unsigned int cols; +}; + +/** + * struct mvx_fw_mv - Motion vector search range. + */ +struct mvx_fw_mv { + unsigned int x; + unsigned int y; +}; + +/** + * struct mvx_fw_bitdepth - Bit depth. + */ +struct mvx_fw_bitdepth { + unsigned int chroma; + unsigned int luma; +}; + +struct mvx_buffer_param_region +{ + uint16_t mbx_left; /**< X coordinate of the left most macroblock */ + uint16_t mbx_right; /**< X coordinate of the right most macroblock */ + uint16_t mby_top; /**< Y coordinate of the top most macroblock */ + uint16_t mby_bottom; /**< Y coordinate of the bottom most macroblock */ + int16_t qp_delta; /**< QP delta value. This region will be encoded + * with qp = qp_default + qp_delta. */ + uint8_t prio; /* the priority of this region */ + uint8_t force_intra; +}; + +struct mvx_roi_config +{ + unsigned int pic_index; + unsigned char qp_present; + unsigned char qp; + unsigned char roi_present; + unsigned char num_roi; + #define MVX_MAX_FRAME_REGIONS 16 + struct mvx_buffer_param_region roi[MVX_MAX_FRAME_REGIONS]; +}; + +struct mvx_buffer_param_rectangle +{ + uint16_t x_left; /* pixel x left edge (inclusive) */ + uint16_t x_right; /* pixel x right edge (exclusive) */ + uint16_t y_top; /* pixel y top edge (inclusive) */ + uint16_t y_bottom; /* pixel y bottom edge (exclusive) */ +}; + +struct mvx_chr_cfg +{ + unsigned int pic_index; + unsigned int num_chr; + #define MVX_MAX_FRAME_CHANGE_RECTANGLES 2 + struct mvx_buffer_param_rectangle rectangle[MVX_MAX_FRAME_CHANGE_RECTANGLES]; +}; + +struct mvx_huff_table +{ + uint32_t type; + #define MVX_OPT_HUFFMAN_TABLE_DC_LUMA (1) + #define MVX_OPT_HUFFMAN_TABLE_AC_LUMA (2) + #define MVX_OPT_HUFFMAN_TABLE_DC_CHROMA (4) + #define MVX_OPT_HUFFMAN_TABLE_AC_CHROMA (8) + uint8_t dc_luma_code_lenght[16]; + uint8_t ac_luma_code_lenght[16]; + uint8_t dc_chroma_code_lenght[16]; + uint8_t ac_chroma_code_lenght[16]; + uint8_t dc_luma_table[162]; + uint8_t ac_luma_table[162]; + uint8_t dc_chroma_table[162]; + uint8_t ac_chroma_table[162]; +}; + +struct mvx_buffer_param_rate_control +{ + uint32_t rate_control_mode; + #define MVX_OPT_RATE_CONTROL_MODE_OFF (0) + #define MVX_OPT_RATE_CONTROL_MODE_STANDARD (1) + #define MVX_OPT_RATE_CONTROL_MODE_VARIABLE (2) + #define MVX_OPT_RATE_CONTROL_MODE_CONSTANT (3) + #define MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE (4) + uint32_t target_bitrate; /* in bits per second */ + uint32_t maximum_bitrate; /* in bits per second */ +}; + +struct mvx_buffer_option_jpeg_rate_control +{ + uint32_t qscale; + uint32_t qscale_luma; + uint32_t qscale_chroma; + uint32_t fps; +}; + +struct mvx_dsl_frame{ + uint32_t width; + uint32_t height; +}; + +struct mvx_dsl_ratio{ + uint32_t hor; + uint32_t ver; +}; + +struct mvx_long_term_ref{ + uint32_t mode; + uint32_t period; +}; + +struct mvx_color_conv_coef +{ + int16_t coef[3][3]; + uint16_t offset[3]; +}; + +struct mvx_rgb2yuv_color_conv_coef +{ + int16_t coef[3 * 3]; + uint8_t luma_range[2]; + uint8_t chroma_range[2]; + uint8_t rgb_range[2]; +}; + +struct mvx_crop_cfg +{ + uint8_t crop_en; + /* left start x of luma in original image */ + uint16_t x; //included + /* top start y of luma in original image */ + uint16_t y; //included + /* cropped width of luma in original image */ + uint16_t width; + /* cropped height of luma in original image */ + uint16_t height; +}; + +struct mvx_seamless_target{ + uint32_t seamless_mode; + uint32_t target_width; + uint32_t target_height; + uint32_t target_stride[MVX_BUFFER_NPLANES]; + uint32_t target_size[MVX_BUFFER_NPLANES]; +}; + +struct mvx_param_osd_cfg{ + uint8_t osd_inside_enable; + uint8_t osd_inside_alpha_enable; + uint8_t osd_inside_convert_color_enable; + uint8_t osd_inside_alpha_value; /* as alpha range [0~16], use u8 */ + uint8_t osd_inside_convert_color_threshold;/* threshold range [0~255], if input is 10bit, th * 4 */ + uint8_t osd_inside_rgb2yuv_mode;/* 0-601L, 1-601F, 2-709_L, 3-709_F */ + uint16_t osd_inside_start_x; /* pixel x left edge (inclusive) */ + uint16_t osd_inside_start_y; /* pixel y top edge (inclusive) */ + uint16_t reserved[3]; +}; + +struct mvx_osd_config{ + unsigned int pic_index; + unsigned int num_osd; + #define MVX_MAX_FRAME_OSD_REGION 2 + struct mvx_param_osd_cfg osd_single_cfg[MVX_MAX_FRAME_OSD_REGION];/* include single osd region config and index */ +}; + +struct mvx_osd_info{ + uint16_t width_osd[MVX_MAX_FRAME_OSD_REGION]; + uint16_t height_osd[MVX_MAX_FRAME_OSD_REGION]; + enum mvx_format inputFormat_osd[MVX_MAX_FRAME_OSD_REGION]; +}; + +/** + * struct mvx_fw_error - Firmware error message. + * @error_code: What kind of error that was reported. + * @message: Error message string. + */ +struct mvx_fw_error { + enum { + MVX_FW_ERROR_ABORT, + MVX_FW_ERROR_OUT_OF_MEMORY, + MVX_FW_ERROR_ASSERT, + MVX_FW_ERROR_UNSUPPORTED, + MVX_FW_ERROR_INVALID_BUFFER, + MVX_FW_ERROR_INVALID_STATE, + MVX_FW_ERROR_WATCHDOG, + MVX_FW_ERROR_INVALID_PARAM + } error_code; + char message[128]; +}; + +/** + * struct mvx_fw_flush - Flush firmware buffers. + * @dir: Which port to flush. + */ +struct mvx_fw_flush { + enum mvx_direction dir; +}; + +/** + * struct mvx_fw_alloc_param - Allocation parameters. + * @width: Width in pixels. + * @height: Height in pixels. + * @afbc_alloc_bytes: AFBC buffer size. + * @afbc_width: AFBC width in superblocks. + * + * Dimensions of a decoded frame buffer. + */ +struct mvx_fw_alloc_param { + unsigned int width; + unsigned int height; + unsigned int afbc_alloc_bytes; + unsigned int afbc_width; + unsigned int afbc_alloc_bytes_downscaled; + unsigned int afbc_width_in_superblocks_downscaled; + unsigned int cropx; + unsigned int cropy; +}; + +/** + * struct mvx_fw_seq_param - Sequence parameters. + * @planar.buffers_min: Minimum number of planar buffers required. + * @afbc.buffers_min: Minimum number of AFBC buffers required. + */ +struct mvx_fw_seq_param { + struct { + unsigned int buffers_min; + } planar; + struct { + unsigned int buffers_min; + } afbc; + + unsigned int bitdepth_luma; + unsigned int bitdepth_chroma; + unsigned int chroma_format; +}; + +enum mvx_fw_range { + MVX_FW_RANGE_UNSPECIFIED, + MVX_FW_RANGE_FULL, + MVX_FW_RANGE_LIMITED +}; + +enum mvx_fw_primaries { + MVX_FW_PRIMARIES_UNSPECIFIED, + MVX_FW_PRIMARIES_BT709, /* Rec.ITU-R BT.709 */ + MVX_FW_PRIMARIES_BT470M, /* Rec.ITU-R BT.470 System M */ + MVX_FW_PRIMARIES_BT601_625, /* Rec.ITU-R BT.601 625 */ + MVX_FW_PRIMARIES_BT601_525, /* Rec.ITU-R BT.601 525 */ + MVX_FW_PRIMARIES_GENERIC_FILM, /* Generic Film */ + MVX_FW_PRIMARIES_BT2020 /* Rec.ITU-R BT.2020 */ +}; + +enum mvx_fw_transfer { + MVX_FW_TRANSFER_UNSPECIFIED, + MVX_FW_TRANSFER_LINEAR, /* Linear transfer characteristics */ + MVX_FW_TRANSFER_SRGB, /* sRGB or equivalent */ + MVX_FW_TRANSFER_SMPTE170M, /* SMPTE 170M */ + MVX_FW_TRANSFER_GAMMA22, /* Assumed display gamma 2.2 */ + MVX_FW_TRANSFER_GAMMA28, /* Assumed display gamma 2.8 */ + MVX_FW_TRANSFER_ST2084, /* SMPTE ST 2084 */ + MVX_FW_TRANSFER_HLG, /* ARIB STD-B67 hybrid-log-gamma */ + MVX_FW_TRANSFER_SMPTE240M, /* SMPTE 240M */ + MVX_FW_TRANSFER_XVYCC, /* IEC 61966-2-4 */ + MVX_FW_TRANSFER_BT1361, /* Rec.ITU-R BT.1361 extended gamut */ + MVX_FW_TRANSFER_ST428 /* SMPTE ST 428-1 */ +}; + +enum mvx_fw_matrix { + MVX_FW_MATRIX_UNSPECIFIED, + MVX_FW_MATRIX_BT709, /* Rec.ITU-R BT.709 */ + MVX_FW_MATRIX_BT470M, /* KR=0.30, KB=0.11 */ + MVX_FW_MATRIX_BT601, /* Rec.ITU-R BT.601 625 */ + MVX_FW_MATRIX_SMPTE240M, /* SMPTE 240M or equivalent */ + MVX_FW_MATRIX_BT2020, /* Rec.ITU-R BT.2020 non-const lum */ + MVX_FW_MATRIX_BT2020Constant /* Rec.ITU-R BT.2020 const lum */ +}; + +struct mvx_fw_primary { + unsigned int x; + unsigned int y; +}; + +/** + * struct mvx_fw_color_desc - HDR color description. + */ +struct mvx_fw_color_desc { + unsigned int flags; + uint8_t range; /* Unspecified=0, Limited=1, Full=2 */ + uint8_t colour_primaries; /* see hevc spec. E.3.1 */ + uint8_t transfer_characteristics; /* see hevc spec. E.3.1 */ + uint8_t matrix_coeff; /* see hevc spec. E.3.1 */ + struct { + struct mvx_fw_primary r; + struct mvx_fw_primary g; + struct mvx_fw_primary b; + struct mvx_fw_primary w; + unsigned int luminance_min; + unsigned int luminance_max; + } display; + struct { + unsigned int luminance_max; + unsigned int luminance_average; + } content; + + uint8_t video_format; + uint8_t aspect_ratio_idc; + uint16_t sar_width; + uint16_t sar_height; + uint32_t num_units_in_tick; + uint32_t time_scale; +}; + +struct mvx_fw_display_size { + uint16_t display_width; + uint16_t display_height; +}; + +struct mvx_sei_userdata{ + uint8_t flags; + #define MVX_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1) + uint8_t uuid[16]; + char user_data[256 - 35]; + uint8_t user_data_len; +}; + +struct mvx_buffer_param_qp{ + int32_t qp; + int32_t epr_iframe_enable; +}; + +struct mvx_lambda_scale{ + uint16_t lambda_scale_i_q8; + uint16_t lambda_scale_sqrt_i_q8; + uint16_t lambda_scale_p_q8; + uint16_t lambda_scale_sqrt_p_q8; + uint16_t lambda_scale_b_ref_q8; + uint16_t lambda_scale_sqrt_b_ref_q8; + uint16_t lambda_scale_b_nonref_q8; + uint16_t lambda_scale_sqrt_b_nonref_q8; +}; +/** + * struct mvx_fw_set_option - Set firmware options. + */ +struct mvx_fw_set_option { + enum { + /** + * Frame rate. + * Extra data: frame_date. + */ + MVX_FW_SET_FRAME_RATE, + + /** + * Bitrate. + * Extra data: target_bitrate. + * + * When target_bitrate is other than zero, rate control + * in HW is enabled, otherwise rate control is disabled. + */ + MVX_FW_SET_TARGET_BITRATE, + + /** + * QP range. + * Extra data: qp_range. + * + * QP range when rate controller is enabled. + */ + MVX_FW_SET_QP_RANGE, + + /** + * NALU format. + * Extra data: nalu_format. + */ + MVX_FW_SET_NALU_FORMAT, + + /** + * Defines if stream escaping is enabled. + * Extra data: stream_escaping. + */ + MVX_FW_SET_STREAM_ESCAPING, + + /** + * Defines profile and level for encoder. + * Extra data: profile_level. + */ + MVX_FW_SET_PROFILE_LEVEL, + + /** + * Ignore stream headers. + * Extra data: ignore_stream_headers. + */ + MVX_FW_SET_IGNORE_STREAM_HEADERS, + + /** + * Enable frame reordering for decoder. + * Extra data: frame_reordering. + */ + MVX_FW_SET_FRAME_REORDERING, + + /** + * Suggested internal buffer size. + * Extra data: intbuf_size. + */ + MVX_FW_SET_INTBUF_SIZE, + + /** + * Number of P frames for encoder. + * Extra data: pb_frames. + */ + MVX_FW_SET_P_FRAMES, + + /** + * Number of B frames for encoder. + * Extra data: pb_frames. + */ + MVX_FW_SET_B_FRAMES, + + /** + * GOP type for encoder. + * Extra data: gop_type. + */ + MVX_FW_SET_GOP_TYPE, + + /** + * Intra MB refresh. + * Extra data: intra_mb_refresh. + */ + MVX_FW_SET_INTRA_MB_REFRESH, + + /** + * Constrained intra prediction. + * Extra data: constr_ipred. + */ + MVX_FW_SET_CONSTR_IPRED, + + /** + * Enable entropy synchronization. + * Extra data: entropy_sync. + */ + MVX_FW_SET_ENTROPY_SYNC, + + /** + * Enable temporal motion vector prediction. + * Extra data: temporal_mvp. + */ + MVX_FW_SET_TEMPORAL_MVP, + + /** + * Tiles size. + * Extra data: tile. + */ + MVX_FW_SET_TILES, + + /** + * Minimum luma coding block size. + * Extra data: min_luma_cb_size. + */ + MVX_FW_SET_MIN_LUMA_CB_SIZE, + + /** + * Entropy mode. + * Extra data: entropy_mode. + */ + MVX_FW_SET_ENTROPY_MODE, + + /** + * Suggested number of CTUs in a slice. + * Extra data: slice_spacing_mb. + */ + MVX_FW_SET_SLICE_SPACING_MB, + + /** + * Probability update method. + * Extra data: vp9_prob_update. + */ + MVX_FW_SET_VP9_PROB_UPDATE, + + /** + * Search range for motion vectors. + * Extra data: mv. + */ + MVX_FW_SET_MV_SEARCH_RANGE, + + /** + * Bitdepth. + * Extra data: bitdepth. + */ + MVX_FW_SET_BITDEPTH, + + /** + * Chroma format. + * Extra data: chroma_format. + */ + MVX_FW_SET_CHROMA_FORMAT, + + /** + * RGB to YUV conversion mode. + * Extra data: rgb_to_yuv_mode. + */ + MVX_FW_SET_RGB_TO_YUV_MODE, + + /** + * Maximum bandwidth limit. + * Extra data: band_limit. + */ + MVX_FW_SET_BAND_LIMIT, + + /** + * CABAC initialization table. + * Extra data: cabac_init_idc. + */ + MVX_FW_SET_CABAC_INIT_IDC, + + /** + * QP for I frames when rate control is disabled. + * Extra data: qp + */ + MVX_FW_SET_QP_I, + + /** + * QP for P frames when rate control is disabled. + * Extra data: qp + */ + MVX_FW_SET_QP_P, + + /** + * QP for B frames when rate control is disabled. + * Extra data: qp + */ + MVX_FW_SET_QP_B, + + /** + * JPEG resync interval. + * Extra data: resync_interval + */ + MVX_FW_SET_RESYNC_INTERVAL, + + /** + * JPEG quantization table. + * Extra data: quant_tbl. + */ + MVX_FW_SET_QUANT_TABLE, + + /** + * Set watchdog timeout. 0 to disable. + */ + MVX_FW_SET_WATCHDOG_TIMEOUT, + + /** + * QP for encode frame. + * Extra data: qp + */ + MVX_FW_SET_QP_REGION, + + /** + * ROI for encode frame. + * Extra data: ROI + */ + MVX_FW_SET_ROI_REGIONS, + + /** + * Rate Control for encode frame. + * Extra data: rate control + */ + MVX_FW_SET_RATE_CONTROL, + /** + * Crop left for encode frame. + * Extra data: crop left + */ + MVX_FW_SET_CROP_LEFT, + /** + * Crop right for encode frame. + * Extra data: crop right + */ + MVX_FW_SET_CROP_RIGHT, + /** + * Crop top for encode frame. + * Extra data: crop top + */ + MVX_FW_SET_CROP_TOP, + /** + * Crop bottom for encode frame. + * Extra data: crop bottom + */ + MVX_FW_SET_CROP_BOTTOM, + + MVX_FW_SET_COLOUR_DESC, + + MVX_FW_SET_SEI_USERDATA, + + MVX_FW_SET_HRD_BUF_SIZE, + + MVX_FW_SET_DSL_FRAME, + + MVX_FW_SET_LONG_TERM_REF, + + MVX_FW_SET_DSL_MODE, + + MVX_FW_SET_MINI_FRAME_CNT, + + MVX_FW_SET_STATS_MODE, + + MVX_FW_SET_CHR_CFG, + + MVX_FW_SET_INIT_QP_I, + MVX_FW_SET_INIT_QP_P, + MVX_FW_SET_SAO_LUMA, + MVX_FW_SET_SAO_CHROMA, + MVX_FW_SET_QP_DELTA_I_P, + MVX_FW_SET_QP_REF_RB_EN, + MVX_FW_SET_RC_CLIP_TOP, + MVX_FW_SET_RC_CLIP_BOT, + MVX_FW_SET_QP_MAP_CLIP_TOP, + MVX_FW_SET_QP_MAP_CLIP_BOT, + MVX_FW_SET_QP_RANGE_I, + MVX_FW_SET_PROFILING, + MVX_FW_SET_HUFF_TABLE, + MVX_FW_SET_RC_BIT_I_MODE, + MVX_FW_SET_RC_BIT_I_RATIO, + MVX_FW_SET_INTER_MED_BUF_SIZE, + MVX_FW_SET_SVCT3_LEVEL1_PERIOD, + MVX_FW_SET_GOP_PFRAMES, + MVX_FW_SET_RATE_CONTROL_JPEG, + MVX_FW_SET_LTR_PERIOD, + MVX_FW_SET_FIXED_QP, + MVX_FW_SET_GDR_NUMBER, + MVX_FW_SET_GDR_PERIOD, + MVX_FW_SET_MULIT_SPS_PPS, + MVX_FW_SET_VISUAL_ENABLE, + MVX_FW_SET_ADPTIVE_QUANTISATION, + MVX_FW_SET_EPR_QP, + MVX_FW_SET_DISABLE_FEATURES, + MVX_FW_SET_SCD_ENABLE, + MVX_FW_SET_SCD_PERCENT, + MVX_FW_SET_SCD_THRESHOLD, + MVX_FW_SET_AQ_SSIM_EN, + MVX_FW_SET_AQ_NEG_RATIO, + MVX_FW_SET_AQ_POS_RATIO, + MVX_FW_SET_AQ_QPDELTA_LMT, + MVX_FW_SET_AQ_INIT_FRM_AVG_SVAR, + MVX_FW_SET_DEC_YUV2RGB_PARAMS, + MVX_FW_SET_ENC_FORCED_UV_VAL, + MVX_FW_SET_DSL_INTERP_MODE, + MVX_FW_SET_ENC_SRC_CROPPING, + MVX_FW_SET_DEC_DST_CROPPING, + MVX_FW_SET_VISUAL_ENABLE_ADAPTIVE_INTRA_BLOCK, + MVX_FW_SET_OSD_CONFIG, + MVX_FW_SET_FSF_MODE, + MVX_FW_SET_ENC_LAMBDA_SCALE, + MVX_FW_SET_ENC_INTRA_IPENALTY_ANGULAR, + MVX_FW_SET_ENC_INTRA_IPENALTY_PLANAR, + MVX_FW_SET_ENC_INTRA_IPENALTY_DC, + MVX_FW_SET_ENC_INTER_IPENALTY_ANGULAR, + MVX_FW_SET_ENC_INTER_IPENALTY_PLANAR, + MVX_FW_SET_ENC_INTER_IPENALTY_DC, + } code; + + /** + * Extra data for an option. + */ + union { + unsigned int frame_rate; + unsigned int target_bitrate; + struct mvx_fw_qp_range qp_range; + enum mvx_nalu_format nalu_format; + bool stream_escaping; + struct mvx_fw_profile_level profile_level; + bool ignore_stream_headers; + bool frame_reordering; + unsigned int intbuf_size; + unsigned int pb_frames; + enum mvx_gop_type gop_type; + unsigned int intra_mb_refresh; + bool constr_ipred; + bool entropy_sync; + bool temporal_mvp; + struct mvx_fw_tile tile; + unsigned int min_luma_cb_size; + enum mvx_entropy_mode entropy_mode; + unsigned int slice_spacing_mb; + enum mvx_vp9_prob_update vp9_prob_update; + struct mvx_fw_mv mv; + struct mvx_fw_bitdepth bitdepth; + unsigned int chroma_format; + enum mvx_rgb_to_yuv_mode rgb_to_yuv_mode; + unsigned int band_limit; + unsigned int cabac_init_idc; + int qp; + int resync_interval; + struct { + uint8_t *chroma; + uint8_t *luma; + } quant_tbl; + int watchdog_timeout; + struct mvx_roi_config roi_config; + struct mvx_buffer_param_rate_control rate_control; + unsigned int crop_left; + unsigned int crop_right; + unsigned int crop_top; + unsigned int crop_bottom; + struct mvx_fw_color_desc colour_desc; + struct mvx_sei_userdata userdata; + unsigned int nHRDBufsize; + struct mvx_dsl_frame dsl_frame; + struct mvx_long_term_ref ltr; + int dsl_pos_mode; + uint32_t mini_frame_cnt; + struct mvx_enc_stats enc_stats; + struct mvx_chr_cfg chr_cfg; + uint32_t init_qpi; + uint32_t init_qpp; + uint32_t sao_luma; + uint32_t sao_chroma; + uint32_t qp_delta_i_p; + uint32_t ref_rb_en; + uint32_t qpmap_qp_clip_top; + uint32_t qpmap_qp_clip_bot; + uint32_t rc_qp_clip_top; + uint32_t rc_qp_clip_bot; + uint32_t profiling; + struct mvx_huff_table huff_table; + uint32_t rc_bit_i_mode; + uint32_t rc_bit_i_ratio; + uint32_t inter_med_buf_size; + uint32_t svct3_level1_period; + uint32_t reset_gop_pframes; + struct mvx_buffer_option_jpeg_rate_control jpeg_rc; + uint32_t reset_ltr_period; + uint32_t fixedqp; + uint32_t gdr_number; + uint32_t gdr_period; + uint32_t mulit_sps_pps; + uint32_t enable_visual; + uint32_t adapt_qnt; + struct mvx_buffer_param_qp epr_qp; + uint32_t disabled_features; + uint32_t scd_enable; + uint32_t scd_percent; + uint32_t scd_threshold; + uint32_t aq_ssim_en; + uint32_t aq_neg_ratio; + uint32_t aq_pos_ratio; + uint32_t aq_qpdelta_lmt; + uint32_t aq_init_frm_avg_svar; + struct mvx_color_conv_coef yuv2rbg_csc_coef; + struct mvx_rgb2yuv_color_conv_coef rgb2yuv_params; + uint32_t forced_uv_value; + uint32_t dsl_interp_mode; + struct mvx_crop_cfg crop; + uint32_t adaptive_intra_block; + struct mvx_osd_config osd_config; + uint32_t fsf_mode; + uint32_t intra_ipenalty_angular; + uint32_t intra_ipenalty_planar; + uint32_t intra_ipenalty_dc; + uint32_t inter_ipenalty_angular; + uint32_t inter_ipenalty_planar; + uint32_t inter_ipenalty_dc; + struct mvx_lambda_scale lambda_scale; + }; +}; +#define MVX_FW_COLOR_DESC_DISPLAY_VALID 0x1 +#define MVX_FW_COLOR_DESC_CONTENT_VALID 0x2 + +/** + * enum mvx_fw_code - Codes for messages sent between driver and firmware. + */ +enum mvx_fw_code { + MVX_FW_CODE_ALLOC_PARAM, /* Driver <- Firmware. */ + MVX_FW_CODE_BUFFER, /* Driver <-> Firmware. */ + MVX_FW_CODE_ERROR, /* Driver <- Firmware. */ + MVX_FW_CODE_IDLE, /* Driver <- Firmware. */ + MVX_FW_CODE_FLUSH, /* Driver <-> Firmware. */ + MVX_FW_CODE_JOB, /* Driver -> Firmware. */ + MVX_FW_CODE_PING, /* Driver -> Firmware. */ + MVX_FW_CODE_PONG, /* Driver <- Firmware. */ + MVX_FW_CODE_SEQ_PARAM, /* Driver <- Firmware. */ + MVX_FW_CODE_SET_OPTION, /* Driver <-> Firmware. */ + MVX_FW_CODE_STATE_CHANGE, /* Driver <-> Firmware. */ + MVX_FW_CODE_SWITCH_IN, /* Driver <- Firmware. */ + MVX_FW_CODE_SWITCH_OUT, /* Driver <-> Firmware. */ + MVX_FW_CODE_IDLE_ACK, /* Driver -> Firmware. */ + MVX_FW_CODE_EOS, /* Driver <-> Firmware. */ + MVX_FW_CODE_COLOR_DESC, /* Driver <- Firmware. */ + MVX_FW_CODE_DUMP, /* Driver <-> Firmware. */ + MVX_FW_CODE_DEBUG, /* Driver <-> Firmware. */ + MVX_FW_CODE_BUFFER_GENERAL, /* Driver <-> Firmware. */ + MVX_FW_CODE_DISPLAY_SIZE, /* Driver <- Firmware. */ + MVX_FW_CODE_UNKNOWN, /* Driver <- Firmware. */ + MVX_FW_CODE_MAX +}; + +/** + * struct mvx_fw_msg - Union of all message types. + */ +struct mvx_fw_msg { + enum mvx_fw_code code; + union { + enum mvx_fw_state state; + struct mvx_fw_job job; + struct mvx_fw_error error; + struct mvx_fw_set_option set_option; + struct mvx_fw_flush flush; + struct mvx_fw_alloc_param alloc_param; + struct mvx_fw_seq_param seq_param; + struct mvx_fw_color_desc color_desc; + struct mvx_buffer *buf; + struct mvx_fw_display_size disp_size; + uint32_t arg; + bool eos_is_frame; + }; +}; + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct mvx_fw_bin; +struct mvx_mmu; +struct mvx_mmu_pages; +struct mvx_session; + +/** + * enum mvx_fw_region - Firmware memory regions. + */ +enum mvx_fw_region { + MVX_FW_REGION_CORE_0, + MVX_FW_REGION_CORE_1, + MVX_FW_REGION_CORE_2, + MVX_FW_REGION_CORE_3, + MVX_FW_REGION_CORE_4, + MVX_FW_REGION_CORE_5, + MVX_FW_REGION_CORE_6, + MVX_FW_REGION_CORE_7, + MVX_FW_REGION_PROTECTED, + MVX_FW_REGION_FRAMEBUF, + MVX_FW_REGION_MSG_HOST, + MVX_FW_REGION_MSG_MVE, + MVX_FW_REGION_BUF_IN_HOST, + MVX_FW_REGION_BUF_IN_MVE, + MVX_FW_REGION_BUF_OUT_HOST, + MVX_FW_REGION_BUF_OUT_MVE, + MVX_FW_REGION_RPC, + MVX_FW_REGION_PRINT_RAM +}; + +/** + * struct mvx_fw - Firmware class. + * @dev: Pointer to device. + * @fw_bin: Pointer to firmware binary. + * @mmu: Pointer to MMU object. + * @session: Pointer to session. + * @client_ops: Client operations. + * @csession: Client session this firmware instance is connected to. + * @text: Pages allocated for the text segment. + * @bss: Pages allocated for the bss segment. + * @bss_shared: Pages allocated for the shared bss segment. + * @dentry: Debugfs entry for the "fw" directory. + * @msg_host: Host message queue. + * @msg_mve: MVE message queue. + * @buf_in_host: Input buffer queue. Host enqueue filled buffers. + * @buf_in_mve: Input buffer queue. MVE return empty buffers. + * @buf_out_host: Output buffer queue. Host enqueue empty buffers. + * @buf_out_mve: Out buffer queue. MVE return filled buffers. + * @rpc: RPC communication area. + * @ncores: Number of cores the firmware has been mapped for. + * @core_mask: Core mask of the firmware has been mapped for. + * @rpc_mem: Keeps track of RPC allocated memory. Maps MVE virtual + * address to 'struct mvx_mmu_pages' object. + * @msg_pending: A subset of the messages that we are waiting for a + * response to. + * @ops: Public firmware interface. + * @ops_priv: Private firmware interface. + * + * There is one firmware instance per active session. The function pointers + * below are not reentrant and should be protected by the session mutex. + */ +struct mvx_fw { + struct device *dev; + const struct mvx_fw_bin *fw_bin; + struct mvx_mmu *mmu; + struct mvx_session *session; + struct mvx_client_ops *client_ops; + struct mvx_client_session *csession; + struct sg_table *print_ram_sgt; + struct mvx_mmu_pages *print_ram_pages; + struct mvx_mmu_pages *text; + struct mvx_mmu_pages *bss; + struct mvx_mmu_pages *bss_shared; + struct dentry *dentry; + void *msg_host; + void *msg_mve; + void *buf_in_host; + void *buf_in_mve; + void *buf_out_host; + void *buf_out_mve; + void *rpc; + void *fw_print_ram; + unsigned int ncores; + unsigned int core_mask; + DECLARE_HASHTABLE(rpc_mem, MVX_FW_HTABLE_BITS); + struct mutex mutex; + struct mutex rpcmem_mutex; + struct mutex mem_mutex; + unsigned int msg_pending; + uint32_t next_va_region_protected; + uint32_t next_va_region_outbuf; + uint32_t msg_mve_sum; + uint32_t host_msg_sum; + uint32_t host_input_buf_sum; + uint32_t host_output_buf_sum; + uint32_t switched_in; + uint32_t job_dequeued; + + struct { + /** + * map_protocol() - MMU map firmware. + * @fw: Pointer to firmware object. + */ + int (*map_protocol)(struct mvx_fw *fw); + + /** + * unmap_protocol() - MMU unmap firmware. + * @fw: Pointer to firmware object. + */ + void (*unmap_protocol)(struct mvx_fw *fw); + + /** + * get_region() - Get begin and end address for memory region. + * @region: Which memory region to get addresses for. + * @begin: MVE virtual begin address. + * @end: MVE virtual end address. + * + * Return: 0 on success, else error code. + */ + int (*get_region)(enum mvx_fw_region region, + uint32_t *begin, + uint32_t *end); + + /** + * get_message() - Read message from firmware message queue. + * @fw: Pointer to firmware object. + * @msg: Firmware message. + * + * Return: 1 if message was received, 0 if no message was + * received, else error code. + */ + int (*get_message)(struct mvx_fw *fw, + struct mvx_fw_msg *msg); + + /** + * put_message() - Write message to firmware message queue. + * @fw: Pointer to firmware object. + * @msg: Firmware message. + * + * Return: 0 on success, else error code. + */ + int (*put_message)(struct mvx_fw *fw, + struct mvx_fw_msg *msg); + + /** + * handle_rpc() - Handle RPC message. + * @fw: Pointer to firmware object. + * + * Return: 1 RPC message handled, 0 no RPC message, + * else error code. + */ + int (*handle_rpc)(struct mvx_fw *fw); + + /** + * handle_fw_ram_print() - Print firmware log from share ram. + * @fw: Pointer to firmware object. + * + * Return: 1 FW ram log printed, 0 no FW ram log printed, + * else error code. + */ + int (*handle_fw_ram_print)(struct mvx_fw *fw); + + /** + * print_stat() - Print debug stats to seq-file. + * @fw: Pointer to firmware object. + * @ind: Indentation level. + * s: Pointer to seq-file. + * + * Return: 0 on success, else error code. + */ + int (*print_stat)(struct mvx_fw *fw, + int ind, + struct seq_file *s); + + /** + * print_debug() - Print debug information. + * @fw: Pointer to firmware object. + */ + void (*print_debug)(struct mvx_fw *fw); + } ops; + + struct { + /** + * send_idle_ack() - Send IDLE ACK message. + * @fw: Pointer to firmware object. + * + * IDLE ACK message will be sent to the firmware if it is + * supported by a host protocol, otherwise the call will be + * ignored. + * + * Return: 0 on success, else error code. + */ + int (*send_idle_ack)(struct mvx_fw *fw); + + /** + * to_mve_profile() - Convert MVX profile to MVE value. + * @mvx_profile: MVX profile. + * @mvx_profile: MVE profile. + * + * Return: 0 on success, else error code. + */ + int (*to_mve_profile)(unsigned int mvx_profile, + uint16_t *mve_profile); + + /** + * to_mve_level() - Convert MVX level to MVE value. + * @mvx_level: MVX level. + * @mvx_tier: MVX tier. + * @mve_level: MVE level. + * + * Return: 0 on success, else error code. + */ + int (*to_mve_level)(unsigned int mvx_level, + unsigned int mvx_tier, + uint16_t *mve_level); + } ops_priv; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_fw_factory() - Construct a firmware object. + * @fw: Pointer to fw. + * @fw:_bin Pointer for firmware binary. + * @mmu: Pointer to MMU instance. + * @session: Pointer to session. + * @client_ops: Pointer to client operations. + * @csession: Client session this firmware instance is registered to. + * @core_mask: Core mask to configure. + * @parent: Debugfs entry for parent debugfs directory entry. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_factory(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask, + struct dentry *parent); + +/** + * mvx_fw_destruct() - Destroy firmware interface instance. + * @fw: Pointer to fw. + */ +void mvx_fw_destruct(struct mvx_fw *fw); + +#endif /* _MVX_FIRMWARE_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_cache.c b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_cache.c new file mode 100755 index 000000000000..f9a15dd8e679 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_cache.c @@ -0,0 +1,783 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvx_log_group.h" +#include "mvx_firmware_cache.h" +#include "mvx_log_ram.h" +#include "mvx_mmu.h" +#include "mvx_secure.h" +#include "mvx_seq.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define CACHE_CLEANUP_INTERVAL_MS 5000 + +#define MVX_SECURE_NUMCORES 4 + +/**************************************************************************** + * Private functions + ****************************************************************************/ + +/* + * Backwards compliance with older kernels. + */ +#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE) +static unsigned int kref_read(const struct kref *kref) +{ + return atomic_read(&kref->refcount); +} + +#endif + +/** + * test_bit_32() - 32 bit version Linux test_bit. + * + * Test if bit is set in bitmap array. + */ +static bool test_bit_32(int bit, + uint32_t *addr) +{ + return 0 != (addr[bit >> 5] & (1 << (bit & 0x1f))); +} + +/** + * hw_id_to_name() - Convert HW id to string + */ +static const char *hw_id_to_string(enum mvx_hw_id id) +{ + switch (id) { + case MVE_v500: + return "v500"; + case MVE_v550: + return "v550"; + case MVE_v61: + return "v61"; + case MVE_v52_v76: + return "v52_v76"; + default: + return "unknown"; + } +} + +/** + * get_fw_name() - Return the file name for the requested format and direction. + * + * This function will neither check if there is hardware support nor if the + * firmware binary is available on the file system. + */ +static int get_fw_name(char *filename, + size_t size, + enum mvx_format format, + enum mvx_direction dir, + struct mvx_hw_ver *hw_ver) +{ + const char *codec = NULL; + const char *enc_dec = (dir == MVX_DIR_INPUT) ? "dec" : "enc"; + size_t n; + + switch (format) { + case MVX_FORMAT_H263: + codec = "mpeg4"; + break; + case MVX_FORMAT_H264: + codec = "h264"; + break; + case MVX_FORMAT_HEVC: + codec = "hevc"; + break; + case MVX_FORMAT_JPEG: + codec = "jpeg"; + break; + case MVX_FORMAT_MPEG2: + codec = "mpeg2"; + break; + case MVX_FORMAT_MPEG4: + codec = "mpeg4"; + break; + case MVX_FORMAT_RV: + codec = "rv"; + break; + case MVX_FORMAT_VC1: + codec = "vc1"; + break; + case MVX_FORMAT_VP8: + codec = "vp8"; + break; + case MVX_FORMAT_VP9: + codec = "vp9"; + break; + case MVX_FORMAT_AV1: + codec = "av1"; + break; + case MVX_FORMAT_AVS2: + codec = "avs2"; + break; + case MVX_FORMAT_AVS: + codec = "avs"; + break; + default: + return -ENOENT; + } + + n = snprintf(filename, size, "%s%s.fwb", codec, enc_dec); + if (n >= size) + return -ENOENT; + + return 0; +} + +static struct mvx_fw_bin *kobj_to_fw_bin(struct kobject *kobj) +{ + return container_of(kobj, struct mvx_fw_bin, kobj); +} + +/** + * fw_bin_destroy() - Destroy instance of firmware binary. + */ +static void fw_bin_destroy(struct kobject *kobj) +{ + struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj); + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Releasing firmware binary. bin=0x%px.", fw_bin); + + if (fw_bin->securevideo == false && + IS_ERR_OR_NULL(fw_bin->nonsecure.fw) == false) + release_firmware(fw_bin->nonsecure.fw); + + if (fw_bin->securevideo != false && + IS_ERR_OR_NULL(fw_bin->secure.securefw) == false) + mvx_secure_release_firmware(fw_bin->secure.securefw); + + list_del(&fw_bin->cache_head); + devm_kfree(fw_bin->dev, fw_bin); +} + +/** + * fw_bin_validate() - Verify that the loaded firmware is a valid binary. + */ +static int fw_bin_validate(const struct firmware *fw, + struct device *dev) +{ + struct mvx_fw_header *header = (struct mvx_fw_header *)fw->data; + + if (fw->size < sizeof(*header)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware binary size smaller than firmware header. size=%zu.", + fw->size); + return -EFAULT; + } + + if (header->text_length > fw->size) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware text length larger than firmware binary size. text_length=%u, size=%zu.", + header->text_length, + fw->size); + return -EFAULT; + } + + return 0; +} + +/** + * fw_bin_callback() - Call firmware ready callback. + */ +static void fw_bin_callback(struct mvx_fw_bin *fw_bin) +{ + struct mvx_fw_event *event; + struct mvx_fw_event *tmp; + int ret; + + /* + * Continue even if lock fails, or else any waiting session will + * be blocked forever. + */ + ret = mutex_lock_interruptible(&fw_bin->mutex); + + /* + * Inform all clients that the firmware has been loaded. This must be + * done even if the firmware load fails, or else the clients will hung + * waiting for a firmware load the will never happen. + */ + list_for_each_entry_safe(event, tmp, &fw_bin->event_list, head) { + list_del(&event->head); + event->fw_bin_ready(fw_bin, event->arg, false); + } + + if (ret == 0) + mutex_unlock(&fw_bin->mutex); +} + +/** + * secure_request_firmware_done() - Firmware load callback routine. + */ +static void secure_request_firmware_done(struct mvx_secure_firmware *securefw, + void *arg) +{ + struct mvx_fw_bin *fw_bin = arg; + + if (securefw == NULL) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to load secure firmware binary. filename=%s.", + fw_bin->filename); + securefw = ERR_PTR(-EINVAL); + goto fw_bin_callback; + } + +fw_bin_callback: + fw_bin->secure.securefw = securefw; + + fw_bin_callback(fw_bin); +} + +/** + * request_firmware_done() - Callback routine after firmware has been loaded. + */ +static void request_firmware_done(const struct firmware *fw, + void *arg) +{ + struct mvx_fw_bin *fw_bin = arg; + struct mvx_fw_header *header; + mvx_mmu_va va; + int ret; + uint32_t i; + + BUG_ON(!arg); + + if (fw == NULL) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to load firmware binary. filename=%s.", + fw_bin->filename); + fw = ERR_PTR(-EINVAL); + goto fw_ready_callback; + } + + ret = fw_bin_validate(fw, fw_bin->dev); + if (ret != 0) { + release_firmware(fw); + fw = ERR_PTR(ret); + goto fw_ready_callback; + } + + header = (struct mvx_fw_header *)fw->data; + fw_bin->nonsecure.header = header; + + /* Calculate number of pages needed for the text segment. */ + fw_bin->nonsecure.text_cnt = + (header->text_length + MVE_PAGE_SIZE - 1) >> MVE_PAGE_SHIFT; + + /* Calculate number of pages needed for the BSS segments. */ + va = header->bss_start_address; + for (i = 0; i < header->bss_bitmap_size; i++) { + if (va >= header->master_rw_start_address && + va < (header->master_rw_start_address + + header->master_rw_size)) + fw_bin->nonsecure.sbss_cnt++; + else if (test_bit_32(i, header->bss_bitmap)) + fw_bin->nonsecure.bss_cnt++; + + va += MVE_PAGE_SIZE; + } + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Loaded firmware binary. bin=0x%px, major=%u, minor=%u, info=\"%s\", jump=0x%x, pages={text=%u, bss=%u, shared=%u}, text_length=%u, bss=0x%x.", + fw_bin, + header->protocol_major, + header->protocol_minor, + header->info_string, + header->rasc_jmp, + fw_bin->nonsecure.text_cnt, + fw_bin->nonsecure.bss_cnt, + fw_bin->nonsecure.sbss_cnt, + header->text_length, + header->bss_start_address); + +fw_ready_callback: + fw_bin->nonsecure.fw = fw; + + fw_bin_callback(fw_bin); +} + +/** + * hwvercmp() - Compare two hardware versions. + * + * Semantic of this function equivalent to strcmp(). + */ +static int hwvercmp(struct mvx_hw_ver *v1, + struct mvx_hw_ver *v2) +{ + if (v1->id != v2->id) + return v1->id - v2->id; + + if (v1->revision != v2->revision) + return v1->revision - v2->revision; + + if (v1->patch != v2->patch) + return v1->patch - v2->patch; + + return 0; +} + +static ssize_t path_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj); + + return scnprintf(buf, PAGE_SIZE, "%s\n", fw_bin->filename); +} + +static ssize_t hw_ver_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj); + struct mvx_hw_ver *hw_ver = &fw_bin->hw_ver; + + return scnprintf(buf, PAGE_SIZE, "%s-%u-%u\n", + hw_id_to_string(hw_ver->id), + hw_ver->revision, + hw_ver->patch); +} + +static ssize_t count_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", + kref_read(&kobj->kref) - 1); +} + +static ssize_t dirty_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct mvx_fw_bin *fw_bin = kobj_to_fw_bin(kobj); + int dirty = 0; + + if (atomic_read(&fw_bin->flush_cnt) != + atomic_read(&fw_bin->cache->flush_cnt)) + dirty = 1; + + return scnprintf(buf, PAGE_SIZE, "%d\n", dirty); +} + +static struct kobj_attribute path_attr = __ATTR_RO(path); +static struct kobj_attribute count_attr = __ATTR_RO(count); +static struct kobj_attribute hw_ver = __ATTR_RO(hw_ver); +static struct kobj_attribute dirty_attr = __ATTR_RO(dirty); + +static struct attribute *mvx_fw_bin_attrs[] = { + &path_attr.attr, + &count_attr.attr, + &hw_ver.attr, + &dirty_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(mvx_fw_bin); + +static struct kobj_type fw_bin_ktype = { + .release = fw_bin_destroy, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = mvx_fw_bin_groups +}; + +/** + * fw_bin_create() - Create a new firmware binary instance. + */ +static struct mvx_fw_bin *fw_bin_create(struct mvx_fw_cache *cache, + enum mvx_format format, + enum mvx_direction dir, + struct mvx_hw_ver *hw_ver, + bool securevideo) +{ + struct mvx_fw_bin *fw_bin; + int ret; + + /* Allocate object and initialize members. */ + fw_bin = devm_kzalloc(cache->dev, sizeof(*fw_bin), GFP_KERNEL); + if (fw_bin == NULL) + return ERR_PTR(-ENOMEM); + + fw_bin->dev = cache->dev; + fw_bin->cache = cache; + fw_bin->format = format; + fw_bin->dir = dir; + fw_bin->hw_ver = *hw_ver; + atomic_set(&fw_bin->flush_cnt, atomic_read(&cache->flush_cnt)); + mutex_init(&fw_bin->mutex); + INIT_LIST_HEAD(&fw_bin->cache_head); + INIT_LIST_HEAD(&fw_bin->event_list); + + fw_bin->securevideo = securevideo; + if (securevideo != false) + fw_bin->secure.secure = cache->secure; + + ret = kobject_init_and_add(&fw_bin->kobj, &fw_bin_ktype, &cache->kobj, + "%px", fw_bin); + if (ret != 0) + goto free_fw_bin; + + ret = get_fw_name(fw_bin->filename, sizeof(fw_bin->filename), format, + dir, &fw_bin->hw_ver); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "No firmware available. format=%d, direction=%d.", + format, dir); + goto free_fw_bin; + } + + kobject_get(&fw_bin->kobj); + + if (securevideo != false) + ret = mvx_secure_request_firmware_nowait( + cache->secure, fw_bin->filename, MVX_SECURE_NUMCORES, + fw_bin, + secure_request_firmware_done); + else + ret = request_firmware_nowait(THIS_MODULE, true, + fw_bin->filename, + fw_bin->dev, GFP_KERNEL, fw_bin, + request_firmware_done); + + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Failed to request firmware. filename=%s, securevideo=%d.", + fw_bin->filename, securevideo); + kobject_put(&fw_bin->kobj); + goto free_fw_bin; + } + + return fw_bin; + +free_fw_bin: + kobject_put(&fw_bin->kobj); + + return ERR_PTR(ret); +} + +/** + * fw_bin_get() - Get reference to firmware binary. + * + * If firmware binary has already been loaded the reference count is increased, + * else the function tries to create a new descriptor and load the firmware + * into memory. + */ +static struct mvx_fw_bin *fw_bin_get(struct mvx_fw_cache *cache, + enum mvx_format format, + enum mvx_direction dir, + struct mvx_hw_ver *hw_ver, + bool securevideo) +{ + struct mvx_fw_bin *fw_bin = NULL; + struct mvx_fw_bin *tmp; + int ret; + + ret = mutex_lock_interruptible(&cache->mutex); + if (ret != 0) + return ERR_PTR(ret); + + /* Search if firmware binary has already been loaded. */ + list_for_each_entry(tmp, &cache->fw_bin_list, cache_head) { + if (tmp->format == format && tmp->dir == dir && + hwvercmp(&tmp->hw_ver, hw_ver) == 0 && + tmp->securevideo == securevideo && + atomic_read(&tmp->flush_cnt) == + atomic_read(&cache->flush_cnt)) { + fw_bin = tmp; + break; + } + } + + /* If firmware was not found, then try to request firmware. */ + if (fw_bin == NULL) { + fw_bin = fw_bin_create(cache, format, dir, hw_ver, securevideo); + if (!IS_ERR(fw_bin)) + list_add(&fw_bin->cache_head, &cache->fw_bin_list); + } else { + kobject_get(&fw_bin->kobj); + } + + mutex_unlock(&cache->mutex); + + return fw_bin; +} + +/**************************************************************************** + * Private functions + ****************************************************************************/ + +static struct mvx_fw_cache *kobj_to_fw_cache(struct kobject *kobj) +{ + return container_of(kobj, struct mvx_fw_cache, kobj); +} + +/** + * cache_flush_show() - FW cache flush status is always 0. + */ +static ssize_t cache_flush_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "0\n"); +} + +/** + * cache_flush_store() - Trigger FW cache flush. + */ +static ssize_t cache_flush_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t size) +{ + struct mvx_fw_cache *cache = kobj_to_fw_cache(kobj); + + atomic_inc(&cache->flush_cnt); + return size; +} + +/** + * Sysfs attribute which triggers FW cache flush. + */ +static struct kobj_attribute cache_flush = + __ATTR(flush, 0600, cache_flush_show, cache_flush_store); + +static struct attribute *mvx_fw_cache_attrs[] = { + &cache_flush.attr, + NULL +}; +ATTRIBUTE_GROUPS(mvx_fw_cache); + +static void cache_release(struct kobject *kobj) +{ + struct mvx_fw_cache *cache = kobj_to_fw_cache(kobj); + + kthread_stop(cache->cache_thread); + kobject_put(cache->kobj_parent); +} + +static struct kobj_type cache_ktype = { + .release = cache_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = mvx_fw_cache_groups, +}; + +static void cache_update(struct mvx_fw_cache *cache) +{ + struct mvx_fw_bin *fw_bin; + struct mvx_fw_bin *tmp; + int ret; + + ret = mutex_lock_interruptible(&cache->mutex); + if (ret != 0) + return; + + list_for_each_entry_safe(fw_bin, tmp, &cache->fw_bin_list, cache_head) { + int ref; + + ref = kref_read(&fw_bin->kobj.kref); + if (ref == 1) + kobject_put(&fw_bin->kobj); + } + + mutex_unlock(&cache->mutex); +} + +static int cache_thread(void *v) +{ + struct mvx_fw_cache *cache = (struct mvx_fw_cache *)v; + + while (!wait_event_interruptible_timeout(cache->wait_queue, + kthread_should_stop(), + msecs_to_jiffies(CACHE_CLEANUP_INTERVAL_MS))) { + cache_update(cache); + } + + return 0; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_fw_cache_construct(struct mvx_fw_cache *cache, + struct device *dev, + struct mvx_secure *secure, + struct kobject *kobj_parent) +{ + int ret; + + cache->dev = dev; + cache->secure = secure; + cache->kobj_parent = kobject_get(kobj_parent); + atomic_set(&cache->flush_cnt, 0); + mutex_init(&cache->mutex); + INIT_LIST_HEAD(&cache->fw_bin_list); + + ret = kobject_init_and_add(&cache->kobj, &cache_ktype, + kobj_parent, "fw_cache"); + if (ret != 0) + goto kobj_put; + + init_waitqueue_head(&cache->wait_queue); + cache->cache_thread = kthread_run(cache_thread, cache, "fw_cache"); + if (IS_ERR(cache->cache_thread)) + goto kobj_put; + + return 0; + +kobj_put: + kobject_put(&cache->kobj); + kobject_put(cache->kobj_parent); + return -EFAULT; +} + +void mvx_fw_cache_destruct(struct mvx_fw_cache *cache) +{ + cache_update(cache); + kobject_put(&cache->kobj); +} + +int mvx_fw_cache_get(struct mvx_fw_cache *cache, + enum mvx_format format, + enum mvx_direction dir, + struct mvx_fw_event *event, + struct mvx_hw_ver *hw_ver, + bool securevideo) +{ + int ret; + struct mvx_fw_bin *fw_bin; + + /* Allocate a new firmware binary or get handle to existing object. */ + fw_bin = fw_bin_get(cache, format, dir, hw_ver, securevideo); + if (IS_ERR(fw_bin)) + return PTR_ERR(fw_bin); + + ret = mutex_lock_interruptible(&fw_bin->mutex); + if (ret != 0) { + mvx_fw_cache_put(cache, fw_bin); + return ret; + } + + /* + * If the firmware binary has already been loaded, then the callback + * routine can be called right away. + * Else the callback and argument is enqueued to the firmware + * notification list. + */ + if ((fw_bin->securevideo != false && + IS_ERR_OR_NULL(fw_bin->secure.securefw) == false)) { + mutex_unlock(&fw_bin->mutex); + event->fw_bin_ready(fw_bin, event->arg, true); + } else if (fw_bin->securevideo == false && + IS_ERR_OR_NULL(fw_bin->nonsecure.fw) == false) { + mutex_unlock(&fw_bin->mutex); + event->fw_bin_ready(fw_bin, event->arg, true); + } else { + list_add(&event->head, &fw_bin->event_list); + mutex_unlock(&fw_bin->mutex); + } + + return 0; +} + +void mvx_fw_cache_put(struct mvx_fw_cache *cache, + struct mvx_fw_bin *fw_bin) +{ + int ret; + + ret = mutex_lock_interruptible(&cache->mutex); + + kobject_put(&fw_bin->kobj); + + if (ret == 0) + mutex_unlock(&cache->mutex); +} + +void mvx_fw_cache_log(struct mvx_fw_bin *fw_bin, + struct mvx_client_session *csession) +{ + struct mvx_log_header header; + struct mvx_log_fw_binary fw_binary; + struct timespec64 timespec; + struct iovec vec[3]; + + if (fw_bin->securevideo != false) + return; + + ktime_get_real_ts64(×pec); + + header.magic = MVX_LOG_MAGIC; + header.length = sizeof(fw_binary) + sizeof(*fw_bin->nonsecure.header); + header.type = MVX_LOG_TYPE_FW_BINARY; + header.severity = MVX_LOG_INFO; + header.timestamp.sec = timespec.tv_sec; + header.timestamp.nsec = timespec.tv_nsec; + + fw_binary.session = (uintptr_t)csession; + + vec[0].iov_base = &header; + vec[0].iov_len = sizeof(header); + + vec[1].iov_base = &fw_binary; + vec[1].iov_len = sizeof(fw_binary); + + vec[2].iov_base = (void *)fw_bin->nonsecure.header; + vec[2].iov_len = sizeof(*fw_bin->nonsecure.header); + + MVX_LOG_DATA(&mvx_log_fwif_if, MVX_LOG_INFO, vec, 3); +} + +void mvx_fw_cache_get_formats(struct mvx_fw_cache *cache, + enum mvx_direction direction, + uint64_t *formats) +{ + /* Support all formats by default. */ + *formats = (1ull << MVX_FORMAT_MAX) - 1ull; + + /* TODO remove formats we can't find any firmware for. */ +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_cache.h b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_cache.h new file mode 100755 index 000000000000..ac8b847e0001 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_cache.h @@ -0,0 +1,247 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_FIRMWARE_CACHE_H_ +#define _MVX_FIRMWARE_CACHE_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include "mvx_if.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct firmware; +struct mvx_client_session; +struct mvx_secure; +struct mvx_secure_firmware; + +/** + * struct mvx_fw_cache - Firmware cache. + * + * There is exactly one firmware context per device. It keeps track of the + * firmware binaries. + */ +struct mvx_fw_cache { + struct device *dev; + struct mvx_secure *secure; + struct mutex mutex; + struct list_head fw_bin_list; + struct kobject kobj; + struct kobject *kobj_parent; + atomic_t flush_cnt; + struct task_struct *cache_thread; + wait_queue_head_t wait_queue; +}; + +/** + * struct mvx_fw_header - Firmware binary header. + * @rasc_jmp: Start address. + * @protocol_minor: Host internface protocol minor version. + * @protocol_major: Host internface protocol major version. + * @reserved: Reserved for future use. Always 0. + * @info_string: Human readable codec information. + * @part_number: Part number. + * @svn_revision: SVN revision. + * @version_string: Firmware version. + * @text_length: Length in bytes of the read-only part of the firmware. + * @bss_start_address: Start address for BSS segment. This is always + * page-aligned. + * @bss_bitmap_size: The number of bits used in 'bss_bitmap'. + * @bss_bitmap: Bitmap which pages that shall be allocated and MMU + * mapped. If bit N is set, then a page shall be allocated + * and MMU mapped to VA address + * FW_BASE + bss_start_address + N * MVE_PAGE_SIZE. + * @master_rw_start_address: Defines a region of shared pages. + * @master_rw_size: Defines a region of shared pages. + */ +struct mvx_fw_header { + uint32_t rasc_jmp; + uint8_t protocol_minor; + uint8_t protocol_major; + uint8_t reserved[2]; + uint8_t info_string[56]; + uint8_t part_number[8]; + uint8_t svn_revision[8]; + uint8_t version_string[16]; + uint32_t text_length; + uint32_t bss_start_address; + uint32_t bss_bitmap_size; + uint32_t bss_bitmap[16]; + uint32_t master_rw_start_address; + uint32_t master_rw_size; +}; + +/** + * struct mvx_fw_bin - Structure describing a loaded firmware binary. + * + * Multiple sessions may share the same firmware binary. + */ +struct mvx_fw_bin { + struct device *dev; + struct mvx_fw_cache *cache; + struct mutex mutex; + struct kobject kobj; + struct list_head cache_head; + struct list_head event_list; + char filename[128]; + enum mvx_format format; + enum mvx_direction dir; + struct mvx_hw_ver hw_ver; + atomic_t flush_cnt; + bool securevideo; + struct { + const struct firmware *fw; + const struct mvx_fw_header *header; + unsigned int text_cnt; + unsigned int bss_cnt; + unsigned int sbss_cnt; + } nonsecure; + struct { + struct mvx_secure *secure; + struct mvx_secure_firmware *securefw; + } secure; +}; + +/** + * struct mvx_fw_event - Firmware load event notification. + * @head: Used by the firmware loader. Should not be used + * by the client. + * @fw_bin_ready: Callback routine invoked after the firmware binary has + * finished loading. Will be called both on success and + * failure. + * @arg: Argument passed to fw_bin_ready. Client may set this + * pointer to any value. + * + * Structure used to keep track of clients that have subscribed to event + * notification after the firmware binary has been loaded. + */ +struct mvx_fw_event { + struct list_head head; + void (*fw_bin_ready)(struct mvx_fw_bin *fw_bin, + void *arg, + bool same_thread); + void *arg; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_fw_cache_construct() - Construct the firmware object. + * @cache: Pointer to firmware cache. + * @dev: Pointer to device. + * @secure: Pointer to secure video. + * @kobj: Pointer to parent kobj. + * + * When FW cache is constructed, corresponding sysfs entry will be created + * and attached as a child to kobj. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_cache_construct(struct mvx_fw_cache *cache, + struct device *dev, + struct mvx_secure *secure, + struct kobject *kobj); + +/** + * mvx_fw_cache_destruct() - Destroy the firmware object. + * @cache: Pointer to firmware cache. + */ +void mvx_fw_cache_destruct(struct mvx_fw_cache *cache); + +/** + * mvx_fw_cache_get() - Get a reference to a firmware binary. + * @cache: Pointer for firmware cache. + * @format: Format used on the bitstream port. + * @dir: Which port that is configured as bitstream port. + * @event: Callback routine and argument that will be invoded after + * the firmware binary has been loaded. + * @hw_ver: MVE hardware version. + * @securevideo:Secure video enabled. + * + * Loading a firmware binary is an asynchronous operation. The client will be + * informed through a callback routine when the binary is ready. + * + * If the firmware binary is already in the cache, then the callback routine + * will be called directly from mvx_fw_cache_get(). The client must take care + * not to reaquire any mutexes already held. + * + * If the firmware binary was not found in the cache, then the callback routine + * will be called from a separete thread context. The client must make sure + * its data is protected by a mutex. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_cache_get(struct mvx_fw_cache *cache, + enum mvx_format format, + enum mvx_direction dir, + struct mvx_fw_event *event, + struct mvx_hw_ver *hw_ver, + bool securevideo); + +/** + * mvx_fw_cache_put() - Return firmware binary to cache and decrement the + * reference count. + * @cache: Pointer to firmware cache. + * @fw:_bin Pointer to firmware binary. + */ +void mvx_fw_cache_put(struct mvx_fw_cache *cache, + struct mvx_fw_bin *fw_bin); + +/** + * mvx_fw_cache_log() - Log firmware binary to ram log. + * @fw_bin: Pointer to firmware binary. + * @csession: Pointer to client session. + */ +void mvx_fw_cache_log(struct mvx_fw_bin *fw_bin, + struct mvx_client_session *csession); + +/** + * mvx_fw_cache_get_formats() - Get supported formats. + * @cache: Pointer to firmware cache. + * @direction: Input or output port. + * @formats: Pointer to bitmask listing supported formats. + */ +void mvx_fw_cache_get_formats(struct mvx_fw_cache *cache, + enum mvx_direction direction, + uint64_t *formats); + +#endif /* _MVX_FIRMWARE_CACHE_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_priv.h b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_priv.h new file mode 100755 index 000000000000..5190e8210983 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_priv.h @@ -0,0 +1,165 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_FIRMWARE_PRIV_H_ +#define _MVX_FIRMWARE_PRIV_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_firmware.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#if !defined(MVE_REQUEST_CODE_IDLE_ACK) +#define MVE_REQUEST_CODE_IDLE_ACK (1012) +#endif + +/**************************************************************************** + * Firmware + ****************************************************************************/ + +struct mvx_client_ops; +struct mvx_client_session; +struct mvx_fw_bin; +struct mvx_mmu; +struct mvx_session; + +/** + * mvx_firmware_construct() - Firmware constructor. + * @fw: Pointer to firmware object. + * @fw_bin: Pointer to firmware binary. + * @mmu: Pointer to MMU. + * @session: Pointer to session. + * @client_ops: Pointer to client operations. + * @csession: Pointer to client session. + * @core_mask: Core mask. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_construct(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask); + +/**************************************************************************** + * Firmware v2 + ****************************************************************************/ + +/** + * mvx_fw_construct_v2() - Construct the object for the firmware v2 interface. + * @fw: Pointer to firmware object. + * @fw_bin: Pointer to firmware binary. + * @mmu: Pointer to MMU. + * @session: Pointer to session. + * @client_ops: Pointer to client operations. + * @csession: Pointer to client session. + * @core_mask: Core mask. + * @major: Major firmware version. + * @minor: Minor firmware version. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_construct_v2(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask, + unsigned char major, + unsigned char minor); + +/** + * mvx_fw_send_idle_ack_v2() - Send idle ack. + * @fw: Pointer to firmware object. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_send_idle_ack_v2(struct mvx_fw *fw); + +/** + * mvx_fw_to_mve_profile_v2() - Convert MVX to MVE profile. + * @mvx_profile: Input profile. + * @mve_profile: Output profile. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_to_mve_profile_v2(unsigned int mvx_profile, + uint16_t *mve_profile); + +/** + * mvx_fw_to_mve_level_v2() - Convert MVX to MVE level. + * @mvx_level: Input level. + * @mvx_tier: Input tier. + * @mve_level: Output level. + * + * Return: 0 on success, else error code. + */ +int mvx_fw_to_mve_level_v2(unsigned int mvx_level, + unsigned int mvx_tier, + uint16_t *mve_level); + +/**************************************************************************** + * Firmware v3 + ****************************************************************************/ + +/** + * mvx_fw_construct_v3() - Construct the object for the firmware v3 interface. + * @fw: Pointer to firmware object. + * @fw_bin: Pointer to firmware binary. + * @mmu: Pointer to MMU. + * @session: Pointer to session. + * @client_ops: Pointer to client operations. + * @csession: Pointer to client session. + * @core_mask: Core mask. + * @major: Major firmware version. + * @minor: Minor firmware version. + * + * Return: 0 on sucess, else error code. + */ +int mvx_fw_construct_v3(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask, + unsigned char major, + unsigned char minor); + +#endif diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_v2.c b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_v2.c new file mode 100755 index 000000000000..b89b251bbafa --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_v2.c @@ -0,0 +1,3914 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include "fw_v2/mve_protocol_def.h" +#include "mvx_firmware_cache.h" +#include "mvx_firmware_priv.h" +#include "mvx_log_group.h" +#include "mvx_log_ram.h" +#include "mvx_mmu.h" +#include "mvx_secure.h" +#include "mvx_seq.h" +#include "mvx_session.h" + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +/** + * is_afbc() - Detect if format is AFBC. + * @format: Color format. + * + * Return: True if AFBC, else false. + */ +static bool is_afbc(unsigned int format) +{ + return (format & (1 << MVE_FORMAT_BF_A)) != 0; +} + +/** + * log_message() - Log a message. + * @session: Pointer to session. + * @channel: The type of the firmware interface message; + * message, input buffer, output buffer or RPC + * @direction: The type of the firmware interface message; + * host->firmware or firware->host. + * @msg_header: The header of the message. + * @data: Pointer to the message data. + */ +static void log_message(struct mvx_session *session, + enum mvx_log_fwif_channel channel, + enum mvx_log_fwif_direction direction, + struct mve_msg_header *msg_header, + void *data) +{ + struct mvx_log_header header; + struct mvx_log_fwif fwif; + struct iovec vec[4]; + struct timespec64 timespec; + + ktime_get_real_ts64(×pec); + + header.magic = MVX_LOG_MAGIC; + header.length = sizeof(fwif) + sizeof(*msg_header) + msg_header->size; + header.type = MVX_LOG_TYPE_FWIF; + header.severity = MVX_LOG_INFO; + header.timestamp.sec = timespec.tv_sec; + header.timestamp.nsec = timespec.tv_nsec; + + fwif.version_major = 2; + fwif.version_minor = 0; + fwif.channel = channel; + fwif.direction = direction; + fwif.session = (uintptr_t)session; + + vec[0].iov_base = &header; + vec[0].iov_len = sizeof(header); + + vec[1].iov_base = &fwif; + vec[1].iov_len = sizeof(fwif); + + vec[2].iov_base = msg_header; + vec[2].iov_len = sizeof(*msg_header); + + vec[3].iov_base = data; + vec[3].iov_len = msg_header->size; + + MVX_LOG_DATA(&mvx_log_fwif_if, MVX_LOG_INFO, vec, 4); +} + +/** + * log_rpc() - Log a RPC message. + * @session: Pointer to session. + * @direction: The type of the firmware interface message; + * host->firmware or firware->host. + * @rpc: RPC message. + */ +static void log_rpc(struct mvx_session *session, + enum mvx_log_fwif_direction direction, + struct mve_rpc_communication_area *rpc) +{ + struct mvx_log_header header; + struct mvx_log_fwif fwif; + size_t rpc_size; + struct iovec vec[3]; + struct timespec64 timespec; + + rpc_size = offsetof(typeof(*rpc), params) + rpc->size; + + if (rpc_size > sizeof(*rpc)) + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "RPC message size is too large. size=%u.", + rpc->size); + + ktime_get_real_ts64(×pec); + + header.magic = MVX_LOG_MAGIC; + header.length = sizeof(fwif) + rpc_size; + header.type = MVX_LOG_TYPE_FWIF; + header.severity = MVX_LOG_INFO; + header.timestamp.sec = timespec.tv_sec; + header.timestamp.nsec = timespec.tv_nsec; + + fwif.version_major = 2; + fwif.version_minor = 0; + fwif.channel = MVX_LOG_FWIF_CHANNEL_RPC; + fwif.direction = direction; + fwif.session = (uintptr_t)session; + + vec[0].iov_base = &header; + vec[0].iov_len = sizeof(header); + + vec[1].iov_base = &fwif; + vec[1].iov_len = sizeof(fwif); + + vec[2].iov_base = rpc; + vec[2].iov_len = rpc_size; + + MVX_LOG_DATA(&mvx_log_fwif_if, MVX_LOG_INFO, vec, 3); +} + +/** + * log_time() - Log timing. + * @session: Pointer to session. + * @event: Pointer to processed event. + */ +static void log_time(struct mvx_session *session, + struct mve_event_processed *event) +{ + struct iovec vec; + struct mvx_time t; + + ktime_get_real_ts64(&t.timespec); + t.parse.start = event->parse_start_time >> 1; + t.parse.end = event->parse_end_time >> 1; + t.pipe.start = event->pipe_start_time >> 1; + t.pipe.end = event->pipe_end_time >> 1; + + vec.iov_base = &t; + vec.iov_len = sizeof(t); + + mvx_log_perf.drain->data(mvx_log_perf.drain, MVX_LOG_INFO, &vec, 1); +} + +static int get_stride90(enum mvx_format format, + uint8_t *nplanes, + unsigned int stride[MVX_BUFFER_NPLANES][2]) +{ + switch (format) { + case MVX_FORMAT_YUV420_I420: + *nplanes = 3; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 1; + stride[1][1] = 1; + stride[2][0] = 1; + stride[2][1] = 1; + break; + case MVX_FORMAT_YUV420_NV12: + case MVX_FORMAT_YUV420_NV21: + *nplanes = 2; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 2; + stride[1][1] = 1; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV420_P010: + case MVX_FORMAT_YUV420_2P_10: + *nplanes = 2; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 4; + stride[1][1] = 1; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV420_Y0L2: + case MVX_FORMAT_YUV420_AQB1: + *nplanes = 1; + stride[0][0] = 8; + stride[0][1] = 1; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV422_YUY2: + case MVX_FORMAT_YUV422_UYVY: + *nplanes = 1; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV422_Y210: + case MVX_FORMAT_YUV422_1P_10: + case MVX_FORMAT_RGBA_8888: + case MVX_FORMAT_BGRA_8888: + case MVX_FORMAT_ARGB_8888: + case MVX_FORMAT_ABGR_8888: + *nplanes = 1; + stride[0][0] = 8; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_RGB_888: + case MVX_FORMAT_BGR_888: + *nplanes = 1; + stride[0][0] = 6; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_RGB_888_3P: + case MVX_FORMAT_YUV444: + *nplanes = 3; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 2; + stride[1][1] = 2; + stride[2][0] = 2; + stride[2][1] = 2; + break; + case MVX_FORMAT_Y: + *nplanes = 1; + stride[0][0] = 2; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_Y_10: + *nplanes = 1; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 0; + stride[1][1] = 0; + stride[2][0] = 0; + stride[2][1] = 0; + break; + case MVX_FORMAT_YUV444_10: + *nplanes = 3; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 4; + stride[1][1] = 2; + stride[2][0] = 4; + stride[2][1] = 2; + break; + case MVX_FORMAT_YUV420_I420_10: + *nplanes = 3; + stride[0][0] = 4; + stride[0][1] = 2; + stride[1][0] = 2; + stride[1][1] = 1; + stride[2][0] = 2; + stride[2][1] = 1; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * read32n() - Read a number of bytes from 'src' to 'dst'. + * @src: Pointer to circular buffer of source data. + * @offset: Current offset in the circular buffer. + * @dst: Pointer to destination buffer. + * @size: Size in bytes. + * + * Return: New offset in the circular buffer. + */ +static unsigned int read32n(volatile uint32_t *src, + unsigned int offset, + uint32_t *dst, + size_t size) +{ + for (; size >= sizeof(uint32_t); size -= sizeof(uint32_t)) { + *dst++ = src[offset]; + offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS; + } + + if (size != 0) { + memcpy(dst, (void *)&src[offset], size); + offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS; + } + + return offset; +} + +/** + * sum32n() - calculate sum of 'size' words from 'offset of 'data' buffer. + * @data: Pointer to circular buffer of source data. + * @offset: Current offset in the circular buffer. + * @size: Size in 4-bytes. + * + * Return: sum. + */ +static uint32_t sum32n(volatile uint32_t *data, + unsigned int offset, + ssize_t size) +{ + uint32_t sum = 0; + + for (; size > 0; size--) { + sum += data[offset]; + offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS; + } + + return sum; +} + +/** + * read_message() - Read message from firmware message queue. + * @fw: Pointer to firmware object. + * @host: Host communication area. + * @mve: MVE communication area. + * @code: Pointer to where the message code shall be placed. + * @data: Pointer to where message data shall be placed. + * @size: Input: the size of the data. Output: The size of the message. + * @channel: Firmware interface message type to log. + * + * Return: 1 if a message was read, 0 if no message was read, else error code. + */ +static int read_message(struct mvx_fw *fw, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + unsigned int *code, + void *data, + size_t *size, + enum mvx_log_fwif_channel channel) +{ + struct mve_msg_header header; + unsigned int rpos, prev_rpos; + ssize_t capacity; + int integrity_check; + int ret; + + ret = mutex_lock_interruptible(&fw->mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, "Get fw mutex failed"); + goto out; + } + + dma_sync_single_for_cpu(fw->dev, + virt_to_phys(mve), + MVE_PAGE_SIZE, DMA_FROM_DEVICE); + + rpos = host->out_rpos; + prev_rpos = rpos; + + /* Calculate how much data that is available in the buffer. */ + if (mve->out_wpos >= rpos) + capacity = mve->out_wpos - rpos; + else + capacity = mve->out_wpos + MVE_COMM_QUEUE_SIZE_IN_WORDS - rpos; + + if (capacity <= 0) { + ret = 0; + goto out; + } + + /* Read the header. */ + rpos = read32n(mve->out_data, rpos, (uint32_t *)&header, + sizeof(header)); + + /* Check message code range */ + if (header.code < MVE_RESPONSE_CODE_SWITCHED_IN || header.code > MVE_BUFFER_CODE_GENERAL) { + ret = 1; + goto out; + } + + + /* Do integrity check only when VPU firmware sends checksum which is in the reserved fields */ + integrity_check = mve->reserved[0] || mve->reserved[1] || mve->reserved[2]; + if (integrity_check) { + /* Message queue integrity check */ + if (channel == MVX_LOG_FWIF_CHANNEL_MESSAGE) { + uint32_t sum = sum32n(mve->out_data, prev_rpos, capacity); + sum += fw->msg_mve_sum; + if (sum != mve->reserved[2] && sum != mve->reserved[1] && sum != mve->reserved[0]) { + MVX_LOG_PRINT(&mvx_log_if, MVX_WAR_LOG_LEVEL, + "Sanity check failed: %u vs %u. rpos = %d, size = %ld, header = 0x%08x(0x%08x)", + sum, mve->reserved[2], prev_rpos, capacity, + mve->out_data[prev_rpos], *(uint32_t *)&header); + *code = 0; + *size = 0; + ret = 1; + goto out; + } + } + } else { + /* Detect duplicated SWITCHED_IN message which is not expected */ + if (header.code == MVE_RESPONSE_CODE_SWITCHED_IN && fw->switched_in) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Duplicated SWITCHED_IN message"); + *code = 0; + *size = 0; + ret = 1; + goto out; + } + + if (header.code == MVE_RESPONSE_CODE_SWITCHED_IN) + fw->switched_in = 1; + else if (header.code == MVE_RESPONSE_CODE_SWITCHED_OUT) + fw->switched_in = 0; + } + + /* Make sure there is enough space for both header and message. */ + capacity -= DIV_ROUND_UP(sizeof(header) + header.size, + sizeof(uint32_t)); + if (capacity < 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware v2 msg larger than capacity. code=%u, size=%u, wpos=%u, rpos=%u.", + header.code, header.size, mve->out_wpos, + host->out_rpos); + *code = 0; + *size = 0; + ret = 1; + goto out; + } + + if (header.size > *size) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware v2 message does not fit in buffer. code=%u, msg_size=%u, size=%zu.", + header.code, header.size, *size); + ret = -ENOMEM; + goto out; + } + + /* Update message sum */ + if (integrity_check && channel == MVX_LOG_FWIF_CHANNEL_MESSAGE) + fw->msg_mve_sum += sum32n(mve->out_data, host->out_rpos, (header.size + 7) >> 2); + + /* Read message body. */ + rpos = read32n(mve->out_data, rpos, data, header.size); + host->out_rpos = rpos; + + /* + * Make sure the read pointer has been written before the cache is + * flushed. + */ + wmb(); + dma_sync_single_for_device(fw->dev, + virt_to_phys(&host->out_rpos), + sizeof(host->out_rpos), DMA_TO_DEVICE); + + *code = header.code; + *size = header.size; + + /* Log firmware message. */ + MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO, + log_message(fw->session, channel, + MVX_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST, + &header, data)); + + ret = 1; + +out: + mutex_unlock(&fw->mutex); + return ret; +} + +/** + * write32n() - Write a number of bytes to 'dst' from 'src'. + * @dst: Pointer to circular buffer of destination data. + * @offset: Current offset in the circular buffer. + * @src: Pointer to source buffer. + * @size: Size in bytes. + * + * Return: New offset in the circular buffer. + */ +static unsigned int write32n(volatile uint32_t *dst, + unsigned int offset, + uint32_t *src, + size_t size) +{ + for (; size >= sizeof(uint32_t); size -= sizeof(uint32_t)) { + dst[offset] = *src++; + offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS; + } + + if (size != 0) { + memcpy((void *)&dst[offset], src, size); + offset = (offset + 1) % MVE_COMM_QUEUE_SIZE_IN_WORDS; + } + + return offset; +} + +/** + * write_message() - Write message to firmware message queue. + * @fw: Pointer to firmware object. + * @host: Host communication area. + * @mve: MVE communication area. + * @code: Message code. + * @data: Pointer to message data. May be NULL if size if 0. + * @size: Size in bytes of data. + * @channel: Firmware interface message type to log. + * + * Return: 0 on success, else error code. + */ +static int write_message(struct mvx_fw *fw, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + unsigned int code, + void *data, + size_t size, + enum mvx_log_fwif_channel channel) +{ + struct mve_msg_header header = { .code = code, .size = size }; + ssize_t capacity; + unsigned int wpos; + int ret; + + ret = mutex_lock_interruptible(&fw->mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, "Get fw mutex failed"); + goto out; + } + + dma_sync_single_for_cpu(fw->dev, + virt_to_phys(&mve->in_rpos), + sizeof(mve->in_rpos), DMA_FROM_DEVICE); + + wpos = host->in_wpos; + + /* Calculate how much space that is available in the buffer. */ + capacity = mve->in_rpos - wpos; + if (capacity <= 0) + capacity += MVE_COMM_QUEUE_SIZE_IN_WORDS; + + /* Make sure there is enough space for both header and message. */ + capacity -= DIV_ROUND_UP(sizeof(header) + size, sizeof(uint32_t)); + if (capacity < 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "No enough space for both header and message."); + ret = -ENOMEM; + goto out; + } + + /* Assume securevideo allways use the firmware version of message check. */ + if (fw->fw_bin->securevideo || strstr(fw->fw_bin->nonsecure.header->version_string, "-sum")) { + uint32_t sum; + uint32_t *last_total_sum; + + if (host == fw->msg_host) { + last_total_sum = &fw->host_msg_sum; + } else if (host == fw->buf_in_host) { + last_total_sum = &fw->host_input_buf_sum; + } else if (host == fw->buf_out_host) { + last_total_sum = &fw->host_output_buf_sum; + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, "Invalid mve_comm_area_host."); + ret = -EINVAL; + goto out; + } + + sum = sum32n((volatile uint32_t*)&header, 0, sizeof(header) / 4); + sum += sum32n((volatile uint32_t*)data, 0, (size + 3) / 4); + *last_total_sum += sum; + + /* Write header. */ + wpos = write32n(host->in_data, wpos, (uint32_t *)&header, + sizeof(header)); + /* Write checksum. */ + wpos = write32n(host->in_data, wpos, last_total_sum, sizeof(uint32_t)); + /* Write message. */ + wpos = write32n(host->in_data, wpos, data, size); + + /** + * reserved[] != 0 indicates enable checksum function. + * reserved[2] for the latest sum. + */ + host->reserved[0] = host->reserved[1]; + host->reserved[1] = host->reserved[2]; + host->reserved[2] = *last_total_sum; + } else { + /* Write header. */ + wpos = write32n(host->in_data, wpos, (uint32_t *)&header, + sizeof(header)); + /* Write message. */ + wpos = write32n(host->in_data, wpos, data, size); + } + + /* + * Make sure all message data has been written before the cache is + * flushed. + */ + wmb(); + dma_sync_single_for_device(fw->dev, + virt_to_phys(host), + MVE_PAGE_SIZE, DMA_TO_DEVICE); + + host->in_wpos = wpos; + + /* + * Make sure the write pointer has been written before the cache is + * flushed. + */ + wmb(); + dma_sync_single_for_device(fw->dev, + virt_to_phys(&host->in_wpos), + sizeof(host->in_wpos), DMA_TO_DEVICE); + + /* Log firmware message. */ + MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO, + log_message(fw->session, channel, + MVX_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE, + &header, data)); + +out: + mutex_unlock(&fw->mutex); + return ret; +} + +static int get_region_v2(enum mvx_fw_region region, + uint32_t *begin, + uint32_t *end) +{ + switch (region) { + case MVX_FW_REGION_CORE_0: + *begin = MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE0_ADDR_END; + break; + case MVX_FW_REGION_CORE_1: + *begin = MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE1_ADDR_END; + break; + case MVX_FW_REGION_CORE_2: + *begin = MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE2_ADDR_END; + break; + case MVX_FW_REGION_CORE_3: + *begin = MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE3_ADDR_END; + break; + case MVX_FW_REGION_CORE_4: + *begin = MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE4_ADDR_END; + break; + case MVX_FW_REGION_CORE_5: + *begin = MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE5_ADDR_END; + break; + case MVX_FW_REGION_CORE_6: + *begin = MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE6_ADDR_END; + break; + case MVX_FW_REGION_CORE_7: + *begin = MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE7_ADDR_END; + break; + case MVX_FW_REGION_PROTECTED: + *begin = MVE_MEM_REGION_PROTECTED_ADDR_BEGIN; + *end = MVE_MEM_REGION_PROTECTED_ADDR_END; + break; + case MVX_FW_REGION_FRAMEBUF: + *begin = MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN; + *end = MVE_MEM_REGION_FRAMEBUF_ADDR_END; + break; + case MVX_FW_REGION_MSG_HOST: + *begin = MVE_COMM_MSG_INQ_ADDR; + *end = MVE_COMM_MSG_INQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_MSG_MVE: + *begin = MVE_COMM_MSG_OUTQ_ADDR; + *end = MVE_COMM_MSG_OUTQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_IN_HOST: + *begin = MVE_COMM_BUF_INQ_ADDR; + *end = MVE_COMM_BUF_INQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_IN_MVE: + *begin = MVE_COMM_BUF_INRQ_ADDR; + *end = MVE_COMM_BUF_INRQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_OUT_HOST: + *begin = MVE_COMM_BUF_OUTQ_ADDR; + *end = MVE_COMM_BUF_OUTQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_OUT_MVE: + *begin = MVE_COMM_BUF_OUTRQ_ADDR; + *end = MVE_COMM_BUF_OUTRQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_RPC: + *begin = MVE_COMM_RPC_ADDR; + *end = MVE_COMM_RPC_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_PRINT_RAM: + *begin = MVE_FW_PRINT_RAM_ADDR; + *end = MVE_FW_PRINT_RAM_ADDR + MVE_FW_PRINT_RAM_SIZE; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void convert_buffer_general(struct mvx_fw *fw, + enum mvx_direction dir, + struct mvx_fw_msg *msg, + struct mve_buffer_general *g) { + struct mvx_buffer *buf = (struct mvx_buffer *)g->header.host_handle; + + if (g->header.host_handle == MVX_FW_CODE_EOS) + return; + + WARN_ON(buf->dir != dir); + + msg->code = MVX_FW_CODE_BUFFER_GENERAL; + msg->buf = buf; +} + + +static void convert_buffer_frame(struct mvx_fw *fw, + enum mvx_direction dir, + struct mvx_fw_msg *msg, + struct mve_buffer_frame *f) +{ + struct mvx_buffer *buf = (struct mvx_buffer *)f->host_handle; + + if (f->host_handle == MVX_FW_CODE_EOS) + return; + + WARN_ON(buf->dir != dir); + + msg->code = MVX_FW_CODE_BUFFER; + msg->buf = buf; + + if (dir == MVX_DIR_OUTPUT) { + unsigned int i; + + buf->width = f->visible_frame_width; + buf->height = f->visible_frame_height; + if (buf->width == 0 || buf->height == 0 || + (f->frame_flags & (MVE_BUFFER_FRAME_FLAG_TOP_PRESENT | MVE_BUFFER_FRAME_FLAG_BOT_PRESENT)) == 0) + for (i = 0; i < buf->nplanes; i++) + (void)mvx_buffer_filled_set(buf, i, 0, 0); + + if (is_afbc(f->format) != false) { + struct mve_buffer_frame_afbc *afbc = &f->data.afbc; + + buf->crop_left = afbc->cropx; + buf->crop_top = afbc->cropy; + } + + if(f->frame_flags & MVE_BUFFER_FRAME_FLAG_ROTATION_90 || f->frame_flags & MVE_BUFFER_FRAME_FLAG_ROTATION_270) + { + struct mve_buffer_frame_planar *planar = &f->data.planar; + struct mvx_buffer_plane *plane; + for(i=0;inplanes;i++) + { + plane = &buf->planes[i]; + plane->stride = planar->stride[i]; + } + } + } + + buf->user_data = f->user_data_tag; + buf->flags = 0; + if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_EOS) + buf->flags |= MVX_BUFFER_EOS; + + if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_REJECTED) + buf->flags |= MVX_BUFFER_REJECTED; + + if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_CORRUPT) + buf->flags |= MVX_BUFFER_CORRUPT; + + if (f->frame_flags & MVE_BUFFER_FRAME_FLAG_DECODE_ONLY) + buf->flags |= MVX_BUFFER_DECODE_ONLY; + + if (f->frame_flags & (MVE_BUFFER_FRAME_FLAG_TOP_PRESENT | MVE_BUFFER_FRAME_FLAG_BOT_PRESENT)) { + buf->flags |= MVX_BUFFER_FRAME_PRESENT; + } + + if (is_afbc(f->format) != false) { + struct mve_buffer_frame_afbc *afbc = &f->data.afbc; + + if(afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_AV1_DECODER)//for av1 dec use MVE_BUFFER_FRAME_AFBC_AV1_TILE_HEADER to get tile + { + if(afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_AV1_TILE_HEADER) + { + buf->flags |= MVX_BUFFER_AFBC_TILED_HEADERS; + buf->flags |= MVX_BUFFER_AFBC_TILED_BODY; + } + } + else + { + if (afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_TILED_HEADER) + buf->flags |= MVX_BUFFER_AFBC_TILED_HEADERS; + if (afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_TILED_BODY) + buf->flags |= MVX_BUFFER_AFBC_TILED_BODY; + } + + if (afbc->afbc_params & MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK) + buf->flags |= MVX_BUFFER_AFBC_32X8_SUPERBLOCK; + } +} + +static void convert_buffer_bitstream(struct mvx_fw *fw, + enum mvx_direction dir, + struct mvx_fw_msg *msg, + struct mve_buffer_bitstream *b) +{ + struct mvx_buffer *buf = (struct mvx_buffer *)b->host_handle; + + if (b->host_handle == MVX_FW_CODE_EOS) + return; + + WARN_ON(buf->dir != dir); + + msg->code = MVX_FW_CODE_BUFFER; + msg->buf = buf; + + if (dir == MVX_DIR_OUTPUT) + mvx_buffer_filled_set(buf, 0, b->bitstream_filled_len, + b->bitstream_offset); + + buf->user_data = b->user_data_tag; + buf->flags = 0; + + if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_EOS) + buf->flags |= MVX_BUFFER_EOS; + + if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME || + b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_BSEOF) + buf->flags |= MVX_BUFFER_EOF; + + if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG) + buf->flags |= MVX_BUFFER_CODEC_CONFIG; + + if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_ENC_STATS) + buf->flags |= MVX_BUFFER_ENC_STATS; + + if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME) + buf->flags |= MVX_BUFFER_END_OF_SUB_FRAME; + + if (b->bitstream_flags & MVE_BUFFER_BITSTREAM_FLAG_SYNCFRAME) + buf->flags |= MVX_BUFFER_SYNCFRAME; + + buf->frame_type = b->frame_type; + buf->src_transform = b->src_transform; + buf->bitstream_remaining_kb = b->bitstream_remaining_kb; +} + +static int convert_buffer_param(struct mvx_fw *fw, + struct mvx_fw_msg *msg, + struct mve_buffer_param *p) +{ + switch (p->type) { + case MVE_BUFFER_PARAM_TYPE_DISPLAY_SIZE: { + struct mve_buffer_param_display_size *size = + &p->data.display_size; + struct mvx_fw_display_size *d = &msg->disp_size; + msg->code = MVX_FW_CODE_DISPLAY_SIZE; + d->display_height = size->display_height; + d->display_width = size->display_width; + break; + } + case MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION: { + struct mve_buffer_param_colour_description *c = + &p->data.colour_description; + struct mvx_fw_color_desc *d = &msg->color_desc; + + d->flags = 0; + + d->colour_primaries = c->colour_primaries; + d->transfer_characteristics = c->transfer_characteristics; + d->matrix_coeff = c->matrix_coeff; + d->range = c->range; + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Colour description param. primaries=%u, transfer=%u, matrix=%u, range=%u", + d->colour_primaries, d->transfer_characteristics, d->matrix_coeff, d->range); + + if (c->flags & + MVE_BUFFER_PARAM_COLOUR_FLAG_MASTERING_DISPLAY_DATA_VALID) { + d->flags |= MVX_FW_COLOR_DESC_DISPLAY_VALID; + + d->display.r.x = c->mastering_display_primaries_x[0]; + d->display.r.y = c->mastering_display_primaries_y[0]; + d->display.g.x = c->mastering_display_primaries_x[1]; + d->display.g.y = c->mastering_display_primaries_y[1]; + d->display.b.x = c->mastering_display_primaries_x[2]; + d->display.b.y = c->mastering_display_primaries_y[2]; + d->display.w.x = c->mastering_white_point_x; + d->display.w.y = c->mastering_white_point_y; + + d->display.luminance_min = + c->min_display_mastering_luminance; + d->display.luminance_max = + c->max_display_mastering_luminance; + } + + if (c->flags & + MVE_BUFFER_PARAM_COLOUR_FLAG_CONTENT_LIGHT_DATA_VALID) { + d->flags |= MVX_FW_COLOR_DESC_CONTENT_VALID; + + d->content.luminance_max = c->max_content_light_level; + d->content.luminance_average = + c->avg_content_light_level; + } + + msg->code = MVX_FW_CODE_COLOR_DESC; + break; + } + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Default buffer param. type=%d", p->type); + break; + } + + return 1; +} + +static int get_buffer(struct mvx_fw *fw, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + enum mvx_direction dir, + struct mvx_fw_msg *msg, + enum mvx_log_fwif_channel channel) +{ + unsigned int code; + union { + struct mve_buffer_frame frame; + struct mve_buffer_bitstream bitstream; + struct mve_buffer_param param; + struct mve_buffer_general general; + } fw_msg; + size_t size = sizeof(fw_msg); + int ret; + + ret = read_message(fw, host, mve, &code, &fw_msg, &size, channel); + if (ret <= 0) + return ret; + + if (fw->session->error) + return 0; + + switch (code) { + case MVE_BUFFER_CODE_FRAME: + convert_buffer_frame(fw, dir, msg, &fw_msg.frame); + break; + case MVE_BUFFER_CODE_BITSTREAM: + convert_buffer_bitstream(fw, dir, msg, &fw_msg.bitstream); + break; + case MVE_BUFFER_CODE_PARAM: + convert_buffer_param(fw, msg, &fw_msg.param); + break; + case MVE_BUFFER_CODE_GENERAL: + convert_buffer_general(fw, dir, msg, &fw_msg.general); + break; + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unknown fw buffer code. code=%u.", code); + break; + } + + return 1; +} + +static int get_message_v2(struct mvx_fw *fw, + struct mvx_fw_msg *msg) +{ + unsigned int code; + union { + struct mve_request_job job; + struct mve_response_state_change state_change; + struct mve_response_error error; + struct mve_response_frame_alloc_parameters alloc_param; + struct mve_response_sequence_parameters seq_param; + struct mve_response_set_option_fail set_option_fail; + struct mve_buffer_param buffer_param; + struct mve_response_event event; + } fw_msg; + size_t size = sizeof(fw_msg); + int ret; + struct mvx_session *session = fw->session; + + ret = read_message(fw, fw->msg_host, fw->msg_mve, &code, &fw_msg, + &size, MVX_LOG_FWIF_CHANNEL_MESSAGE); + if (ret <= 0) + return ret; + + msg->code = MVX_FW_CODE_MAX; + + switch (code) { + case MVE_RESPONSE_CODE_SWITCHED_IN: + msg->code = MVX_FW_CODE_SWITCH_IN; + break; + case MVE_RESPONSE_CODE_SWITCHED_OUT: + msg->code = MVX_FW_CODE_SWITCH_OUT; + break; + case MVE_RESPONSE_CODE_SET_OPTION_CONFIRM: + msg->code = MVX_FW_CODE_SET_OPTION; + fw->msg_pending--; + break; + case MVE_RESPONSE_CODE_SET_OPTION_FAIL: { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware set option failed. index=%u, msg=%s.", + fw_msg.set_option_fail.index, + fw_msg.set_option_fail.message); + msg->code = MVX_FW_CODE_SET_OPTION; + fw->msg_pending--; + break; + } + case MVE_RESPONSE_CODE_JOB_DEQUEUED: + msg->code = MVX_FW_CODE_JOB; + break; + case MVE_RESPONSE_CODE_INPUT: + ret = get_buffer(fw, fw->buf_in_host, fw->buf_in_mve, + MVX_DIR_INPUT, msg, + MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER); + break; + case MVE_RESPONSE_CODE_OUTPUT: + ret = get_buffer(fw, fw->buf_out_host, fw->buf_out_mve, + MVX_DIR_OUTPUT, msg, + MVX_LOG_FWIF_CHANNEL_OUTPUT_BUFFER); + break; + case MVE_BUFFER_CODE_PARAM: + ret = convert_buffer_param(fw, msg, &fw_msg.buffer_param); + break; + case MVE_RESPONSE_CODE_INPUT_FLUSHED: + msg->code = MVX_FW_CODE_FLUSH; + msg->flush.dir = MVX_DIR_INPUT; + fw->msg_pending--; + break; + case MVE_RESPONSE_CODE_OUTPUT_FLUSHED: + msg->code = MVX_FW_CODE_FLUSH; + msg->flush.dir = MVX_DIR_OUTPUT; + fw->msg_pending--; + break; + case MVE_RESPONSE_CODE_PONG: + msg->code = MVX_FW_CODE_PONG; + break; + case MVE_RESPONSE_CODE_ERROR: { + msg->code = MVX_FW_CODE_ERROR; + + switch (fw_msg.error.error_code) { + case MVE_ERROR_ABORT: + msg->error.error_code = MVX_FW_ERROR_ABORT; + break; + case MVE_ERROR_OUT_OF_MEMORY: + msg->error.error_code = MVX_FW_ERROR_OUT_OF_MEMORY; + break; + case MVE_ERROR_ASSERT: + msg->error.error_code = MVX_FW_ERROR_ASSERT; + break; + case MVE_ERROR_UNSUPPORTED: + msg->error.error_code = MVX_FW_ERROR_UNSUPPORTED; + break; + case MVE_ERROR_INVALID_BUFFER: + msg->error.error_code = MVX_FW_ERROR_INVALID_BUFFER; + break; + case MVE_ERROR_INVALID_STATE: + msg->error.error_code = MVX_FW_ERROR_INVALID_STATE; + break; + case MVE_ERROR_WATCHDOG: + msg->error.error_code = MVX_FW_ERROR_WATCHDOG; + break; + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported fw error code. code=%u.", + fw_msg.error.error_code); + break; + } + + strlcpy(msg->error.message, fw_msg.error.message, + min(sizeof(msg->error.message), + sizeof(fw_msg.error.message))); + + break; + } + case MVE_RESPONSE_CODE_STATE_CHANGE: { + msg->code = MVX_FW_CODE_STATE_CHANGE; + + if (fw_msg.state_change.new_state == MVE_STATE_STOPPED) + msg->state = MVX_FW_STATE_STOPPED; + else + msg->state = MVX_FW_STATE_RUNNING; + + fw->msg_pending--; + break; + } + case MVE_RESPONSE_CODE_DUMP: + msg->code = MVX_FW_CODE_DUMP; + fw->msg_pending--; + break; + case MVE_RESPONSE_CODE_DEBUG: + msg->code = MVX_FW_CODE_DEBUG; + fw->msg_pending--; + break; + case MVE_RESPONSE_CODE_IDLE: + msg->code = MVX_FW_CODE_IDLE; + break; + case MVE_RESPONSE_CODE_FRAME_ALLOC_PARAM: + msg->code = MVX_FW_CODE_ALLOC_PARAM; + msg->alloc_param.width = + fw_msg.alloc_param.planar_alloc_frame_width; + msg->alloc_param.height = + fw_msg.alloc_param.planar_alloc_frame_height; + msg->alloc_param.afbc_alloc_bytes = + fw_msg.alloc_param.afbc_alloc_bytes; + msg->alloc_param.afbc_width = + fw_msg.alloc_param.afbc_width_in_superblocks; + msg->alloc_param.afbc_alloc_bytes_downscaled = + fw_msg.alloc_param.afbc_alloc_bytes_downscaled; + msg->alloc_param.afbc_width_in_superblocks_downscaled = + fw_msg.alloc_param.afbc_width_in_superblocks_downscaled; + msg->alloc_param.cropx = + fw_msg.alloc_param.cropx; + msg->alloc_param.cropy = + fw_msg.alloc_param.cropy; + // For invalid streams parsing width/height, set msg as error + if (msg->alloc_param.width < 144 || msg->alloc_param.height < 144) { + msg->code = MVX_FW_CODE_ERROR; + msg->error.error_code = MVX_FW_ERROR_INVALID_PARAM; + strlcpy(msg->error.message, fw_msg.error.message, + min(sizeof(msg->error.message), + sizeof(fw_msg.error.message))); + } + break; + case MVE_RESPONSE_CODE_SEQUENCE_PARAMETERS: + msg->code = MVX_FW_CODE_SEQ_PARAM; + msg->seq_param.planar.buffers_min = + fw_msg.seq_param.num_buffers_planar; + msg->seq_param.afbc.buffers_min = + fw_msg.seq_param.num_buffers_afbc; + msg->seq_param.bitdepth_luma = fw_msg.seq_param.bitdepth_luma; + msg->seq_param.bitdepth_chroma = fw_msg.seq_param.bitdepth_chroma; + msg->seq_param.chroma_format = fw_msg.seq_param.chroma_format; + session->port[MVX_DIR_OUTPUT].interlaced = fw_msg.seq_param.interlace; + break; + case MVE_RESPONSE_CODE_EVENT: + if (fw_msg.event.event_code == MVE_EVENT_ERROR_STREAM_NOT_SUPPORTED) { + /* If critical error event is received, convert it to error message, so the session can be exit */ + msg->code = MVX_FW_CODE_ERROR; + msg->error.error_code = MVX_FW_ERROR_UNSUPPORTED; + strlcpy(msg->error.message, fw_msg.event.event_data.message, + min(sizeof(msg->error.message), + sizeof(fw_msg.error.message))); + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "EVENT code=%d. %s", + fw_msg.event.event_code, + (MVE_EVENT_ERROR_STREAM_CORRUPT == fw_msg.event.event_code) ? fw_msg.event.event_data.message : ""); + } + if (fw_msg.event.event_code == MVE_EVENT_PROCESSED && (mvx_log_perf.enabled & MVX_LOG_PERF_UTILIZATION)) + log_time(session, &fw_msg.event.event_data.event_processed); + break; + case MVE_RESPONSE_CODE_REF_FRAME_UNUSED: + case MVE_RESPONSE_CODE_TRACE: + break; + default: + MVX_LOG_PRINT(&mvx_log_if, code == 0 ? MVX_WAR_LOG_LEVEL : MVX_LOG_WARNING, + "Unknown fw message code. code=%u, size=%u, rpos = %d, wpos = %d.", + code, size, + ((struct mve_comm_area_host *)(fw->msg_host))->out_rpos, + ((struct mve_comm_area_mve *)(fw->msg_mve))->out_wpos); + msg->code = MVX_FW_CODE_UNKNOWN; + ret = EAGAIN; + break; + } + + return ret; +} + +static int put_buffer_general(struct mvx_fw *fw, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + struct mvx_fw_msg *msg, + enum mvx_log_fwif_channel channel) +{ + int ret; + struct mve_buffer_general g = { 0 }; + struct mvx_buffer *buf = msg->buf; + g.header.host_handle = (ptrdiff_t)buf; + g.header.user_data_tag = buf->user_data; + g.header.buffer_ptr = mvx_buffer_va(buf, 0); + g.header.buffer_size = buf->general.header.buffer_size; + g.header.config_size = buf->general.header.config_size; + g.header.type = buf->general.header.type; + + memcpy(&g.config, &buf->general.config, sizeof(g.config)); + ret = write_message(fw, host, mve, MVE_BUFFER_CODE_GENERAL, &g, + sizeof(g), channel); + + return ret; +} + +static int put_buffer_frame(struct mvx_fw *fw, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + struct mvx_fw_msg *msg, + enum mvx_log_fwif_channel channel) +{ + struct mve_buffer_frame f = { 0 }; + struct mvx_buffer *buf = msg->buf; + struct mvx_session *session = fw->session; + int ret; + int stride_shift = 0, stride = 0; + unsigned int strideRot[MVX_BUFFER_NPLANES]; + unsigned int frame_max_width; + unsigned int frame_max_height; + int max_height; + uint32_t scaling_shift = 0; + uint32_t rotation = (buf->flags & MVX_BUFFER_FRAME_FLAG_ROTATION_MASK) >> 12; + scaling_shift = (buf->flags & MVX_BUFFER_FRAME_FLAG_SCALING_MASK) >> 14; + f.host_handle = (ptrdiff_t)buf; + f.user_data_tag = buf->user_data; + + if (buf->dir == MVX_DIR_INPUT) { + f.visible_frame_width = (session->visible_width > 0) && (session->visible_width <= buf->width) ? + session->visible_width : buf->width; + f.visible_frame_height = (session->visible_height > 0) && (session->visible_height <= buf->height) ? + session->visible_height : buf->height; + + if (buf->flags & MVX_BUFFER_EOS) + f.frame_flags |= MVE_BUFFER_FRAME_FLAG_EOS; + + if (buf->planes[0].filled != 0) + f.frame_flags |= MVE_BUFFER_FRAME_FLAG_TOP_PRESENT; + + if (buf->flags & MVX_BUFFER_FRAME_FLAG_FORCE_IDR) + f.frame_flags |= MVE_BUFFER_FRAME_FLAG_FORCE_IDR; + if (buf->flags & MVX_BUFFER_FRAME_FLAG_RESET_RC) + f.frame_flags |= MVE_BUFFER_FRAME_FLAG_RESET_RC; + } + + if (buf->dir == MVX_DIR_OUTPUT && (session->dsl_ratio.hor != 1 || session->dsl_ratio.ver != 1)) { + f.frame_flags |= ((session->dsl_ratio.hor - 1) << 24 | (session->dsl_ratio.ver - 1) << 17); + } + if (buf->flags & MVX_BUFFER_INTERLACE) + f.frame_flags |= MVE_BUFFER_FRAME_FLAG_INTERLACE; + + f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_ROTATION_MASK) >> 8; + f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_MIRROR_MASK) >> 8; + f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_SCALING_MASK) >> 8; + f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_GOP_REST) ? MVE_BUFFER_FRAME_FLAG_RESET_GOP : 0; + f.frame_flags |= (buf->flags & MVX_BUFFER_FRAME_FLAG_LTR_REST) ? MVE_BUFFER_FRAME_FLAG_RESET_LTR_PEROID : 0; + if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) { + uint8_t nplanes = 0; + unsigned int stride90[MVX_BUFFER_NPLANES][2]; + int i; + get_stride90(buf->format, &nplanes, stride90); + for (i = 0; i < buf->nplanes; i++) { + const unsigned int stride_align = 1; + unsigned int tmp = DIV_ROUND_UP(buf->height * stride90[i][0], 2); + strideRot[i] = round_up(tmp, stride_align); + } + } + + switch (buf->format) { + case MVX_FORMAT_YUV420_AFBC_8: + case MVX_FORMAT_Y_AFBC_8: + f.format = MVE_FORMAT_YUV420_AFBC_8; + break; + case MVX_FORMAT_YUV420_AFBC_10: + case MVX_FORMAT_Y_AFBC_10: + f.format = MVE_FORMAT_YUV420_AFBC_10; + break; + case MVX_FORMAT_YUV422_AFBC_8: + f.format = MVE_FORMAT_YUV422_AFBC_8; + break; + case MVX_FORMAT_YUV422_AFBC_10: + f.format = MVE_FORMAT_YUV422_AFBC_10; + break; + case MVX_FORMAT_YUV420_I420: + f.format = MVE_FORMAT_YUV420_I420; + break; + case MVX_FORMAT_YUV420_NV12: + f.format = MVE_FORMAT_YUV420_NV12; + break; + case MVX_FORMAT_YUV420_NV21: + f.format = MVE_FORMAT_YUV420_NV21; + break; + case MVX_FORMAT_YUV420_P010: + f.format = MVE_FORMAT_YUV420_P010; + break; + case MVX_FORMAT_YUV420_Y0L2: + f.format = MVE_FORMAT_YUV420_Y0L2; + break; + case MVX_FORMAT_YUV420_AQB1: + f.format = MVE_FORMAT_YUV420_AQB1; + break; + case MVX_FORMAT_YUV422_YUY2: + f.format = MVE_FORMAT_YUV422_YUY2; + break; + case MVX_FORMAT_YUV422_UYVY: + f.format = MVE_FORMAT_YUV422_UYVY; + break; + case MVX_FORMAT_YUV422_Y210: + f.format = MVE_FORMAT_YUV422_Y210; + break; + case MVX_FORMAT_RGBA_8888: + f.format = MVE_FORMAT_RGBA_8888; + break; + case MVX_FORMAT_BGRA_8888: + f.format = MVE_FORMAT_BGRA_8888; + break; + case MVX_FORMAT_ARGB_8888: + f.format = MVE_FORMAT_ARGB_8888; + break; + case MVX_FORMAT_ABGR_8888: + f.format = MVE_FORMAT_ABGR_8888; + break; + case MVX_FORMAT_RGB_888: + f.format = MVE_FORMAT_RGB_888; + break; + case MVX_FORMAT_BGR_888: + f.format = MVE_FORMAT_BGR_888; + break; + case MVX_FORMAT_RGB_888_3P: + f.format = MVE_FORMAT_RGB_3P; + break; + case MVX_FORMAT_ARGB_1555: + f.format = MVE_FORMAT_ARGB_1555; + break; + case MVX_FORMAT_ARGB_4444: + f.format = MVE_FORMAT_ARGB_4444; + break; + case MVX_FORMAT_RGB_565: + f.format = MVE_FORMAT_RGB_565; + break; + case MVX_FORMAT_Y: + f.format = MVE_FORMAT_Y; + break; + case MVX_FORMAT_Y_10: + f.format = MVE_FORMAT_Y_10; + break; + case MVX_FORMAT_YUV444: + f.format = MVE_FORMAT_YUV444; + break; + case MVX_FORMAT_YUV444_10: + f.format = MVE_FORMAT_YUV444_10; + break; + case MVX_FORMAT_YUV420_2P_10: + f.format = MVE_FORMAT_YUV420_2P_10; + break; + case MVX_FORMAT_YUV422_1P_10: + f.format = MVE_FORMAT_YUV422_1P_10; + break; + case MVX_FORMAT_YUV420_I420_10: + f.format = MVE_FORMAT_YUV420_I420_10; + break; + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported frame format. format=%u.", + buf->format); + return -EINVAL; + } + + if (is_afbc(f.format) == false) { + struct mve_buffer_frame_planar *planar = &f.data.planar; + int i; + + if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE){ + max_height = buf->width; + stride_shift = 1; + max_height >>= 1; + } +#if 0 + for (i = 0; i < buf->nplanes; i++) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + if (plane->stride > 0) + planar->plane_top[i] = mvx_buffer_va(buf, i); + + planar->stride[i] = plane->stride; + planar->plane_bot[i] = 0; + + if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE) + + planar->plane_bot[i] = planar->plane_top[i] + + DIV_ROUND_UP( + plane->filled, 2); + } +#else + for (i = 0; i < buf->nplanes; i++) { + struct mvx_buffer_plane *plane = &buf->planes[i]; + + if (plane->stride > 0) { + planar->plane_top[i] = mvx_buffer_va(buf, i); + } + + if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE) { + // interlace mode + stride = plane->stride; + //stride_shift = 1; + if (stride_shift) { + stride = round_up(stride, 2) << stride_shift; + } + planar->stride[i] = stride; + planar->plane_bot[i] = planar->plane_top[i] + + (round_up(stride, 2) >> stride_shift); + if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) { + planar->stride[i] = strideRot[i]; + } + } else { + // frame mode + if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)){ + planar->stride[i] = strideRot[i]; + } else { + planar->stride[i] = plane->stride; + } + planar->plane_bot[i] = 0; + + } + } + +#endif + mvx_buffer_max_resolution(buf, &frame_max_width, &frame_max_height); + if (buf->dir == MVX_DIR_OUTPUT && (rotation == 1 || rotation == 3)) { + planar->max_frame_width = (uint16_t)frame_max_height; + planar->max_frame_height = (uint16_t)frame_max_width; + } else { + planar->max_frame_width = (uint16_t)frame_max_width; + planar->max_frame_height = (uint16_t)frame_max_height; + } + f.mini_frame_y_start = (uint16_t)(buf->offset >> 16); + f.mini_frame_y_end = (uint16_t)(buf->offset & 0xFFFF); + } else { + struct mve_buffer_frame_afbc *afbc = &f.data.afbc; + + afbc->afbc_width_in_superblocks[0] = buf->planes[0].afbc_width; + afbc->plane[0] = mvx_buffer_va(buf, 0); + + if (f.frame_flags & MVE_BUFFER_FRAME_FLAG_INTERLACE) { + afbc->alloc_bytes[0] = + ALIGN((buf->planes[0].filled / 2), 32); + afbc->alloc_bytes[1] = + buf->planes[0].filled - afbc->alloc_bytes[0]; + afbc->plane[1] = + afbc->plane[0] + afbc->alloc_bytes[0]; + afbc->afbc_width_in_superblocks[1] = + afbc->afbc_width_in_superblocks[0]; + } else { + if (buf->nplanes > 1 && buf->planes[1].filled > 0) + { + // dual afbc dsl case: swap plane 0 and plane 1 for VPU input + #ifndef MVE_BUFFER_FRAME_FLAG_DUAL_AFBC_DOWNSCALED + #define MVE_BUFFER_FRAME_FLAG_DUAL_AFBC_DOWNSCALED (1 << 6) + #endif + afbc->afbc_width_in_superblocks[0] = buf->planes[1].afbc_width; + afbc->plane[0] = mvx_buffer_va(buf, 1); + afbc->alloc_bytes[0] = buf->planes[1].filled; + afbc->afbc_width_in_superblocks[1] = buf->planes[0].afbc_width; + afbc->plane[1] = mvx_buffer_va(buf, 0); + afbc->alloc_bytes[1] = buf->planes[0].filled; + f.frame_flags &= ~MVE_BUFFER_FRAME_FLAG_SCALING_MASK; + f.frame_flags |= MVE_BUFFER_FRAME_FLAG_DUAL_AFBC_DOWNSCALED; //0 disable will not generate dual afbc dsl data,(1 << 6) enable scale half + } else { + afbc->alloc_bytes[0] = buf->planes[0].filled; + } + } + + afbc->afbc_params = 0; + //on av1->afbc case, let mve handle afbc stride + if (session->port[MVX_DIR_INPUT].format == MVX_FORMAT_AV1 && + mvx_is_afbc(session->port[MVX_DIR_OUTPUT].format) != false) { + afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_STRIDE_SET_BY_MVE; + afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_HEADER; + afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_BODY; + } + if (buf->flags & MVX_BUFFER_AFBC_TILED_HEADERS) + afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_HEADER; + + if (buf->flags & MVX_BUFFER_AFBC_TILED_BODY) + afbc->afbc_params |= MVE_BUFFER_FRAME_AFBC_TILED_BODY; + + if (buf->flags & MVX_BUFFER_AFBC_32X8_SUPERBLOCK) + afbc->afbc_params |= + MVE_BUFFER_FRAME_AFBC_32X8_SUPERBLOCK; + } + + if (buf->flags & MVX_BUFFER_FRAME_FLAG_OSD_1) { + f.visible_frame_width = f.data.planar.max_frame_width = session->osd_info.width_osd[0]; + f.visible_frame_height = f.data.planar.max_frame_height = session->osd_info.height_osd[0]; + f.data.planar.stride[0] = session->osd_info.width_osd[0] * 2; + f.data.planar.stride[1] = f.data.planar.stride[2] = 0; + f.data.planar.plane_top[1] = f.data.planar.plane_top[2] = 0; + f.user_data_tag = OSD0_TAG; + switch (session->osd_info.inputFormat_osd[0]){ + case MVX_FORMAT_ARGB_1555: + f.format = MVE_FORMAT_ARGB_1555; + break; + case MVX_FORMAT_ARGB_4444: + f.format = MVE_FORMAT_ARGB_1555; + break; + case MVX_FORMAT_RGB_565: + f.format = MVE_FORMAT_RGB_565; + break; + default: + f.format = MVE_FORMAT_ARGB_1555; + break; + } + } else if (buf->flags & MVX_BUFFER_FRAME_FLAG_OSD_2) { + f.visible_frame_width = f.data.planar.max_frame_width = session->osd_info.width_osd[1]; + f.visible_frame_height = f.data.planar.max_frame_height = session->osd_info.height_osd[1]; + f.data.planar.stride[0] = session->osd_info.width_osd[1] * 2; + f.data.planar.stride[1] = f.data.planar.stride[2] = 0; + f.data.planar.plane_top[1] = f.data.planar.plane_top[2] = 0; + f.user_data_tag = OSD1_TAG; + switch (session->osd_info.inputFormat_osd[1]){ + case MVX_FORMAT_ARGB_1555: + f.format = MVE_FORMAT_ARGB_1555; + break; + case MVX_FORMAT_ARGB_4444: + f.format = MVE_FORMAT_ARGB_1555; + break; + case MVX_FORMAT_RGB_565: + f.format = MVE_FORMAT_RGB_565; + break; + default: + f.format = MVE_FORMAT_ARGB_1555; + break; + } + } + + ret = write_message(fw, host, mve, MVE_BUFFER_CODE_FRAME, + &f, sizeof(f), channel); + + return ret; +} + +static int put_buffer_bitstream(struct mvx_fw *fw, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + struct mvx_fw_msg *msg, + enum mvx_log_fwif_channel channel) +{ + struct mve_buffer_bitstream b = { 0 }; + struct mvx_buffer *buf = msg->buf; + int ret; + + if (buf->dir == MVX_DIR_INPUT) + b.bitstream_filled_len = buf->planes[0].filled; + + b.host_handle = (ptrdiff_t)buf; + b.user_data_tag = buf->user_data; + b.bitstream_alloc_bytes = mvx_buffer_size(buf, 0); + b.bitstream_buf_addr = mvx_buffer_va(buf, 0); + + if (buf->flags & MVX_BUFFER_EOS) + b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_EOS; + + if (buf->flags & MVX_BUFFER_EOF){ + b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_ENDOFFRAME; + b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_BSEOF; + } + if (buf->flags & MVX_BUFFER_END_OF_SUB_FRAME) { + b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME; + } + if (buf->flags & MVX_BUFFER_CODEC_CONFIG) { + b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_CODECCONFIG; + b.bitstream_flags |= MVE_BUFFER_BITSTREAM_FLAG_ENDOFSUBFRAME; + } + + ret = write_message(fw, host, mve, MVE_BUFFER_CODE_BITSTREAM, &b, + sizeof(b), channel); + + return ret; +} + +static int to_mve_nalu_format(enum mvx_nalu_format fmt, + int *mve_val) +{ + switch (fmt) { + case MVX_NALU_FORMAT_START_CODES: + *mve_val = MVE_OPT_NALU_FORMAT_START_CODES; + break; + case MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER: + *mve_val = MVE_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER; + break; + case MVX_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD: + *mve_val = MVE_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD; + break; + case MVX_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD: + *mve_val = MVE_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD; + break; + case MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD: + *mve_val = MVE_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD; + break; + default: + return -EINVAL; + } + + return 0; +} + +int mvx_fw_to_mve_profile_v2(unsigned int mvx_profile, + uint16_t *mve_profile) +{ + switch (mvx_profile) { + case MVX_PROFILE_H264_BASELINE: + *mve_profile = MVE_OPT_PROFILE_H264_BASELINE; + break; + case MVX_PROFILE_H264_MAIN: + *mve_profile = MVE_OPT_PROFILE_H264_MAIN; + break; + case MVX_PROFILE_H264_HIGH: + *mve_profile = MVE_OPT_PROFILE_H264_HIGH; + break; + case MVX_PROFILE_H265_MAIN: + *mve_profile = MVE_OPT_PROFILE_H265_MAIN; + break; + case MVX_PROFILE_H265_MAIN_STILL: + *mve_profile = MVE_OPT_PROFILE_H265_MAIN_STILL; + break; + case MVX_PROFILE_H265_MAIN_INTRA: + *mve_profile = MVE_OPT_PROFILE_H265_MAIN_INTRA; + break; + case MVX_PROFILE_H265_MAIN_10: + *mve_profile = MVE_OPT_PROFILE_H265_MAIN_10; + break; + case MVX_PROFILE_VC1_SIMPLE: + *mve_profile = MVE_OPT_PROFILE_VC1_SIMPLE; + break; + case MVX_PROFILE_VC1_MAIN: + *mve_profile = MVE_OPT_PROFILE_VC1_MAIN; + break; + case MVX_PROFILE_VC1_ADVANCED: + *mve_profile = MVE_OPT_PROFILE_VC1_ADVANCED; + break; + case MVX_PROFILE_VP8_MAIN: + *mve_profile = MVE_OPT_PROFILE_VP8_MAIN; + break; + default: + return -EINVAL; + } + + return 0; +} + +int mvx_fw_to_mve_level_v2(unsigned int mvx_level, + unsigned int mvx_tier, + uint16_t *mve_level) +{ + switch (mvx_level) { + case MVX_LEVEL_NONE: + *mve_level = 0; + break; + case MVX_LEVEL_H264_1: + *mve_level = MVE_OPT_LEVEL_H264_1; + break; + case MVX_LEVEL_H264_1b: + *mve_level = MVE_OPT_LEVEL_H264_1b; + break; + case MVX_LEVEL_H264_11: + *mve_level = MVE_OPT_LEVEL_H264_11; + break; + case MVX_LEVEL_H264_12: + *mve_level = MVE_OPT_LEVEL_H264_12; + break; + case MVX_LEVEL_H264_13: + *mve_level = MVE_OPT_LEVEL_H264_13; + break; + case MVX_LEVEL_H264_2: + *mve_level = MVE_OPT_LEVEL_H264_2; + break; + case MVX_LEVEL_H264_21: + *mve_level = MVE_OPT_LEVEL_H264_21; + break; + case MVX_LEVEL_H264_22: + *mve_level = MVE_OPT_LEVEL_H264_22; + break; + case MVX_LEVEL_H264_3: + *mve_level = MVE_OPT_LEVEL_H264_3; + break; + case MVX_LEVEL_H264_31: + *mve_level = MVE_OPT_LEVEL_H264_31; + break; + case MVX_LEVEL_H264_32: + *mve_level = MVE_OPT_LEVEL_H264_32; + break; + case MVX_LEVEL_H264_4: + *mve_level = MVE_OPT_LEVEL_H264_4; + break; + case MVX_LEVEL_H264_41: + *mve_level = MVE_OPT_LEVEL_H264_41; + break; + case MVX_LEVEL_H264_42: + *mve_level = MVE_OPT_LEVEL_H264_42; + break; + case MVX_LEVEL_H264_5: + *mve_level = MVE_OPT_LEVEL_H264_5; + break; + case MVX_LEVEL_H264_51: + *mve_level = MVE_OPT_LEVEL_H264_51; + break; + case MVX_LEVEL_H264_52: + *mve_level = MVE_OPT_LEVEL_H264_52; + break; + case MVX_LEVEL_H264_6: + *mve_level = MVE_OPT_LEVEL_H264_6; + break; + case MVX_LEVEL_H264_61: + *mve_level = MVE_OPT_LEVEL_H264_61; + break; + default: + if (mvx_tier == MVX_TIER_HIGH) { + switch (mvx_level) { + case MVX_LEVEL_H265_1: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_1; + break; + case MVX_LEVEL_H265_2: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_2; + break; + case MVX_LEVEL_H265_21: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_21; + break; + case MVX_LEVEL_H265_3: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_3; + break; + case MVX_LEVEL_H265_31: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_31; + break; + case MVX_LEVEL_H265_4: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_4; + break; + case MVX_LEVEL_H265_41: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_41; + break; + case MVX_LEVEL_H265_5: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_5; + break; + case MVX_LEVEL_H265_51: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_51; + break; + case MVX_LEVEL_H265_52: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_52; + break; + case MVX_LEVEL_H265_6: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_6; + break; + case MVX_LEVEL_H265_61: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_61; + break; + case MVX_LEVEL_H265_62: + *mve_level = MVE_OPT_LEVEL_H265_HIGH_TIER_62; + break; + default: + return -EINVAL; + } + + } else { + switch (mvx_level) { + case MVX_LEVEL_H265_1: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_1; + break; + case MVX_LEVEL_H265_2: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_2; + break; + case MVX_LEVEL_H265_21: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_21; + break; + case MVX_LEVEL_H265_3: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_3; + break; + case MVX_LEVEL_H265_31: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_31; + break; + case MVX_LEVEL_H265_4: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_4; + break; + case MVX_LEVEL_H265_41: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_41; + break; + case MVX_LEVEL_H265_5: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_5; + break; + case MVX_LEVEL_H265_51: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_51; + break; + case MVX_LEVEL_H265_52: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_52; + break; + case MVX_LEVEL_H265_6: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_6; + break; + case MVX_LEVEL_H265_61: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_61; + break; + case MVX_LEVEL_H265_62: + *mve_level = MVE_OPT_LEVEL_H265_MAIN_TIER_62; + break; + default: + return -EINVAL; + } + } + } + + return 0; +} + +static int to_mve_gop_type(enum mvx_gop_type gop, + unsigned int *mve_arg) +{ + switch (gop) { + case MVX_GOP_TYPE_BIDIRECTIONAL: + *mve_arg = MVE_OPT_GOP_TYPE_BIDIRECTIONAL; + break; + case MVX_GOP_TYPE_LOW_DELAY: + *mve_arg = MVE_OPT_GOP_TYPE_LOW_DELAY; + break; + case MVX_GOP_TYPE_PYRAMID: + *mve_arg = MVE_OPT_GOP_TYPE_PYRAMID; + break; + case MVX_GOP_TYPE_SVCT3: + *mve_arg = MVE_OPT_GOP_TYPE_SVCT3; + break; + case MVX_GOP_TYPE_GDR: + *mve_arg = MVE_OPT_GOP_TYPE_GDR; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int to_mve_h264_cabac(enum mvx_entropy_mode entropy_mode, + unsigned int *mve_arg) +{ + switch (entropy_mode) { + case MVX_ENTROPY_MODE_CABAC: + *mve_arg = 1; + break; + case MVX_ENTROPY_MODE_CAVLC: + *mve_arg = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int to_mve_vp9_prob_update(enum mvx_vp9_prob_update prob_update, + unsigned int *mve_arg) +{ + switch (prob_update) { + case MVX_VP9_PROB_UPDATE_DISABLED: + *mve_arg = MVE_OPT_VP9_PROB_UPDATE_DISABLED; + break; + case MVX_VP9_PROB_UPDATE_IMPLICIT: + *mve_arg = MVE_OPT_VP9_PROB_UPDATE_IMPLICIT; + break; + case MVX_VP9_PROB_UPDATE_EXPLICIT: + *mve_arg = MVE_OPT_VP9_PROB_UPDATE_EXPLICIT; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int put_fw_opt(struct mvx_fw *fw, + struct mve_request_set_option *opt, + size_t size) +{ + int ret; + + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_SET_OPTION, + opt, offsetof(typeof(*opt), data) + size, + MVX_LOG_FWIF_CHANNEL_MESSAGE); + + if (ret == 0) + fw->msg_pending++; + + return ret; +} + +static int put_fw_buf_param(struct mvx_fw *fw, + struct mve_buffer_param *param, + size_t size) +{ + return write_message(fw, fw->buf_in_host, fw->buf_in_mve, + MVE_BUFFER_CODE_PARAM, + param, offsetof(typeof(*param), data) + size, + MVX_LOG_FWIF_CHANNEL_MESSAGE); +} + +static int put_message_v2(struct mvx_fw *fw, + struct mvx_fw_msg *msg) +{ + int ret = 0; + + switch (msg->code) { + case MVX_FW_CODE_STATE_CHANGE: { + unsigned int code = msg->state == MVX_FW_STATE_STOPPED ? + MVE_REQUEST_CODE_STOP : + MVE_REQUEST_CODE_GO; + + ret = write_message(fw, fw->msg_host, fw->msg_mve, + code, NULL, 0, + MVX_LOG_FWIF_CHANNEL_MESSAGE); + if (ret == 0) + fw->msg_pending++; + + break; + } + case MVX_FW_CODE_JOB: { + struct mve_request_job job; + + job.cores = msg->job.cores; + job.frames = msg->job.frames; + job.flags = 0; + + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_JOB, &job, sizeof(job), + MVX_LOG_FWIF_CHANNEL_MESSAGE); + break; + } + case MVX_FW_CODE_SWITCH_OUT: { + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_SWITCH, NULL, 0, + MVX_LOG_FWIF_CHANNEL_MESSAGE); + break; + } + case MVX_FW_CODE_PING: { + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_PING, NULL, 0, + MVX_LOG_FWIF_CHANNEL_MESSAGE); + break; + } + case MVX_FW_CODE_SET_OPTION: { + switch (msg->set_option.code) { + case MVX_FW_SET_FRAME_RATE: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_FRAME_RATE; + param.data.arg = msg->set_option.frame_rate; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.arg)); + break; + } + case MVX_FW_SET_TARGET_BITRATE: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_RATE_CONTROL; + if (msg->set_option.target_bitrate == 0) { + param.data.rate_control.rate_control_mode = + MVE_OPT_RATE_CONTROL_MODE_OFF; + param.data.rate_control.target_bitrate = 0; + } else { + param.data.rate_control.rate_control_mode = + MVE_OPT_RATE_CONTROL_MODE_STANDARD; + param.data.rate_control.target_bitrate = + msg->set_option.target_bitrate; + } + + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.rate_control)); + break; + } + case MVX_FW_SET_RATE_CONTROL_JPEG: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_JPEG_RC; + + opt.data.jpeg_rate_control.fps = msg->set_option.jpeg_rc.fps; + opt.data.jpeg_rate_control.qscale = msg->set_option.jpeg_rc.qscale; + opt.data.jpeg_rate_control.qscale_luma = msg->set_option.jpeg_rc.qscale_luma; + opt.data.jpeg_rate_control.qscale_chroma = msg->set_option.jpeg_rc.qscale_chroma; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.jpeg_rate_control)); + break; + } + case MVX_FW_SET_RATE_CONTROL: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_RATE_CONTROL; + if (msg->set_option.rate_control.target_bitrate == 0) { + param.data.rate_control.rate_control_mode = + MVE_OPT_RATE_CONTROL_MODE_OFF; + param.data.rate_control.target_bitrate = 0; + } else { + param.data.rate_control.rate_control_mode = + msg->set_option.rate_control.rate_control_mode; + param.data.rate_control.target_bitrate = + msg->set_option.rate_control.target_bitrate; + if (msg->set_option.rate_control.rate_control_mode == MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE) { + param.data.rate_control.maximum_bitrate = + msg->set_option.rate_control.maximum_bitrate; + } + } + + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.rate_control)); + break; + + } + case MVX_FW_SET_CROP_LEFT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_LEFT; + opt.data.arg = msg->set_option.crop_left; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_CROP_RIGHT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_RIGHT; + opt.data.arg = msg->set_option.crop_right; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_CROP_TOP: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_TOP; + opt.data.arg = msg->set_option.crop_top; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_CROP_BOTTOM: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_CROP_RARAM_BOTTOM; + opt.data.arg = msg->set_option.crop_bottom; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_RC_BIT_I_MODE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_RC_I_BIT_MODE; + opt.data.arg = msg->set_option.rc_bit_i_mode; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_RC_BIT_I_RATIO: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_RC_I_BIT_RATIO; + opt.data.arg = msg->set_option.rc_bit_i_ratio; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_HRD_BUF_SIZE: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_HRD_BUF_SIZE; + param.data.arg = msg->set_option.nHRDBufsize; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.arg)); + break; + } + case MVX_FW_SET_COLOUR_DESC: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_COLOUR_DESCRIPTION; + param.data.colour_description.flags = msg->set_option.colour_desc.flags; + + param.data.colour_description.colour_primaries = msg->set_option.colour_desc.colour_primaries; + param.data.colour_description.transfer_characteristics = msg->set_option.colour_desc.transfer_characteristics; + param.data.colour_description.matrix_coeff = msg->set_option.colour_desc.matrix_coeff; + param.data.colour_description.range = msg->set_option.colour_desc.range; + + param.data.colour_description.sar_height = msg->set_option.colour_desc.sar_height; + param.data.colour_description.sar_width = msg->set_option.colour_desc.sar_width; + if (msg->set_option.colour_desc.aspect_ratio_idc != 0) { + param.data.colour_description.aspect_ratio_idc = msg->set_option.colour_desc.aspect_ratio_idc; + param.data.colour_description.aspect_ratio_info_present_flag = 1; + } + if (msg->set_option.colour_desc.video_format != 0) { + param.data.colour_description.video_format = msg->set_option.colour_desc.video_format; + param.data.colour_description.video_format_present_flag = 1; + } + if (fw->session->port[MVX_DIR_OUTPUT].format == MVX_FORMAT_H264) { + param.data.colour_description.time_scale = fw->session->fps_n * 2; + param.data.colour_description.num_units_in_tick = fw->session->fps_d; + param.data.colour_description.timing_flag_info_present_flag = 1; + } else { + /* for HEVC, just use the time info in VPS, so clear the flag in VUI */ + param.data.colour_description.timing_flag_info_present_flag = 0; + } + if (msg->set_option.colour_desc.flags & MVX_FW_COLOR_DESC_CONTENT_VALID) { + param.data.colour_description.avg_content_light_level = + msg->set_option.colour_desc.content.luminance_average; + param.data.colour_description.max_content_light_level = + msg->set_option.colour_desc.content.luminance_max; + } + if (msg->set_option.colour_desc.flags & MVX_FW_COLOR_DESC_DISPLAY_VALID) { + param.data.colour_description.mastering_display_primaries_x[0] = + msg->set_option.colour_desc.display.r.x; + param.data.colour_description.mastering_display_primaries_x[1] = + msg->set_option.colour_desc.display.g.x; + param.data.colour_description.mastering_display_primaries_x[2] = + msg->set_option.colour_desc.display.b.x; + param.data.colour_description.mastering_display_primaries_y[0] = + msg->set_option.colour_desc.display.r.y; + param.data.colour_description.mastering_display_primaries_y[1] = + msg->set_option.colour_desc.display.g.y; + param.data.colour_description.mastering_display_primaries_y[2] = + msg->set_option.colour_desc.display.b.y; + param.data.colour_description.mastering_white_point_x = + msg->set_option.colour_desc.display.w.x; + param.data.colour_description.mastering_white_point_y = + msg->set_option.colour_desc.display.w.y; + param.data.colour_description.max_display_mastering_luminance = + msg->set_option.colour_desc.display.luminance_max; + param.data.colour_description.min_display_mastering_luminance = + msg->set_option.colour_desc.display.luminance_min; + } + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.colour_description)); + + break; + + } + case MVX_FW_SET_OSD_CONFIG: { + struct mve_buffer_param param; + param.type = MVE_BUFFER_PARAM_TYPE_OSD_RECTANGLES; + memcpy(¶m.data.osd_rectangles_buff, &msg->set_option.osd_config.osd_single_cfg, + sizeof(param.data.osd_rectangles_buff)); + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.osd_rectangles_buff)); + break; + } + case MVX_FW_SET_SEI_USERDATA: { + struct mve_buffer_param param; + param.type = MVE_BUFFER_PARAM_TYPE_SEI_USER_DATA_UNREGISTERED; + param.data.user_data_unregistered.user_data_len = msg->set_option.userdata.user_data_len; + param.data.user_data_unregistered.flags = msg->set_option.userdata.flags; + memcpy(¶m.data.user_data_unregistered.uuid, &msg->set_option.userdata.uuid, + sizeof(param.data.user_data_unregistered.uuid)); + memcpy(¶m.data.user_data_unregistered.user_data, &msg->set_option.userdata.user_data, + sizeof(param.data.user_data_unregistered.user_data)); + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.user_data_unregistered)); + break; + } + case MVX_FW_SET_NALU_FORMAT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_NALU_FORMAT; + ret = to_mve_nalu_format(msg->set_option.nalu_format, + &opt.data.arg); + + if (ret == 0) + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_STREAM_ESCAPING: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_STREAM_ESCAPING; + opt.data.arg = msg->set_option.stream_escaping ? 1 : 0; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_PROFILE_LEVEL: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_PROFILE_LEVEL; + ret = fw->ops_priv.to_mve_profile( + msg->set_option.profile_level.profile, + &opt.data.profile_level.profile); + if (ret != 0) + return ret; + + ret = fw->ops_priv.to_mve_level( + msg->set_option.profile_level.level, + msg->set_option.profile_level.tier, + &opt.data.profile_level.level); + if (ret != 0) + return ret; + + ret = put_fw_opt( + fw, &opt, + sizeof(opt.data.profile_level)); + + break; + } + case MVX_FW_SET_FSF_MODE: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DEC_AV1_FSF; + opt.data.arg = + msg->set_option.fsf_mode ; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_IGNORE_STREAM_HEADERS: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_IGNORE_STREAM_HEADERS; + opt.data.arg = + msg->set_option.ignore_stream_headers ? 1 : 0; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_FRAME_REORDERING: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_FRAME_REORDERING; + opt.data.arg = msg->set_option.frame_reordering ? 1 : 0; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_INTBUF_SIZE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_INTBUF_SIZE; + opt.data.arg = msg->set_option.intbuf_size; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_P_FRAMES: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_P_FRAMES; + opt.data.arg = msg->set_option.pb_frames; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_PROFILING: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_PROFILING; + opt.data.arg = msg->set_option.profiling; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_B_FRAMES: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_B_FRAMES; + opt.data.arg = msg->set_option.pb_frames; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_GOP_TYPE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_GOP_TYPE; + ret = to_mve_gop_type(msg->set_option.gop_type, + &opt.data.arg); + if (ret == 0) + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_SVCT3_LEVEL1_PERIOD: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_SVCT3_LEVEL1_PEROID; + opt.data.arg = msg->set_option.svct3_level1_period; + if (ret == 0) + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_INTER_MED_BUF_SIZE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_INTBUF_SIZE; + opt.data.arg = msg->set_option.inter_med_buf_size; + + if (ret == 0) + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_GOP_PFRAMES: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_GOP_RESET_DYNAMIC; + param.data.reset_gop_dynamic.reset_gop_pframes = msg->set_option.reset_gop_pframes; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.reset_gop_dynamic)); + break; + } + case MVX_FW_SET_LTR_PERIOD: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_GOP_RESET_LTR_PEROID_DYNAMIC; + param.data.reset_ltr_peroid_dynamic.reset_ltr_peroid_pframes = msg->set_option.reset_ltr_period; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.reset_ltr_peroid_dynamic)); + break; + } + case MVX_FW_SET_INTRA_MB_REFRESH: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_INTRA_MB_REFRESH; + opt.data.arg = msg->set_option.intra_mb_refresh; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_CONSTR_IPRED: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_CONSTR_IPRED; + opt.data.arg = msg->set_option.constr_ipred ? 1 : 0; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENTROPY_SYNC: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_ENTROPY_SYNC; + opt.data.arg = msg->set_option.entropy_sync ? 1 : 0; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_TEMPORAL_MVP: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_TEMPORAL_MVP; + opt.data.arg = msg->set_option.temporal_mvp ? 1 : 0; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_TILES: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_TILES; + opt.data.tiles.tile_rows = msg->set_option.tile.rows; + opt.data.tiles.tile_cols = msg->set_option.tile.cols; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.tiles)); + break; + } + case MVX_FW_SET_MIN_LUMA_CB_SIZE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_MIN_LUMA_CB_SIZE; + opt.data.arg = msg->set_option.min_luma_cb_size; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QP_RANGE: { + struct mve_buffer_param param; + + param.type = + MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE; + param.data.rate_control_qp_range.qp_min = + msg->set_option.qp_range.min; + param.data.rate_control_qp_range.qp_max = + msg->set_option.qp_range.max; + ret = put_fw_buf_param( + fw, ¶m, + sizeof(param.data.rate_control_qp_range)); + break; + } + case MVX_FW_SET_QP_RANGE_I: { + struct mve_buffer_param param; + + param.type = + MVE_BUFFER_PARAM_TYPE_RATE_CONTROL_QP_RANGE_I; + param.data.rate_control_qp_range.qp_min = + msg->set_option.qp_range.min; + param.data.rate_control_qp_range.qp_max = + msg->set_option.qp_range.max; + ret = put_fw_buf_param( + fw, ¶m, + sizeof(param.data.rate_control_qp_range)); + break; + } + case MVX_FW_SET_ENTROPY_MODE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_H264_CABAC; + ret = to_mve_h264_cabac(msg->set_option.entropy_mode, + &opt.data.arg); + if (ret == 0) + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_SLICE_SPACING_MB: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_SLICE_SPACING; + opt.data.arg = msg->set_option.slice_spacing_mb; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_VP9_PROB_UPDATE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_VP9_PROB_UPDATE; + ret = to_mve_vp9_prob_update( + msg->set_option.vp9_prob_update, + &opt.data.arg); + if (ret == 0) + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.arg)); + + break; + } + case MVX_FW_SET_MV_SEARCH_RANGE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_MV_SEARCH_RANGE; + opt.data.motion_vector_search_range.mv_search_range_x = + msg->set_option.mv.x; + opt.data.motion_vector_search_range.mv_search_range_y = + msg->set_option.mv.y; + ret = put_fw_opt( + fw, &opt, + sizeof(opt.data.motion_vector_search_range)); + break; + } + case MVX_FW_SET_BITDEPTH: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_STREAM_BITDEPTH; + opt.data.bitdepth.luma_bitdepth = + msg->set_option.bitdepth.luma; + opt.data.bitdepth.chroma_bitdepth = + msg->set_option.bitdepth.chroma; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.bitdepth)); + break; + } + case MVX_FW_SET_CHROMA_FORMAT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_STREAM_CHROMA_FORMAT; + opt.data.arg = msg->set_option.chroma_format; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_RGB_TO_YUV_MODE: { + struct mve_request_set_option opt; + int i = 0; + opt.index = MVE_SET_OPT_INDEX_ENC_RGB_TO_YUV_MODE; + opt.data.rgb2yuv_params.rgb2yuv_mode = 0; // no use + for( ; i < 9; i++){ + opt.data.rgb2yuv_params.coef[i] = msg->set_option.rgb2yuv_params.coef[i]; + } + + opt.data.rgb2yuv_params.luma_range[0] = msg->set_option.rgb2yuv_params.luma_range[0]; + opt.data.rgb2yuv_params.luma_range[1] = msg->set_option.rgb2yuv_params.luma_range[1]; + opt.data.rgb2yuv_params.chroma_range[0] = msg->set_option.rgb2yuv_params.chroma_range[0]; + opt.data.rgb2yuv_params.chroma_range[1] = msg->set_option.rgb2yuv_params.chroma_range[1]; + opt.data.rgb2yuv_params.rgb_range[0] = msg->set_option.rgb2yuv_params.rgb_range[0]; + opt.data.rgb2yuv_params.rgb_range[1] = msg->set_option.rgb2yuv_params.rgb_range[1]; + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.rgb2yuv_params)); + break; + } + case MVX_FW_SET_BAND_LIMIT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_BANDWIDTH_LIMIT; + opt.data.arg = msg->set_option.band_limit; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_CABAC_INIT_IDC: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_CABAC_INIT_IDC; + opt.data.arg = msg->set_option.cabac_init_idc; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QP_I: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_QP_I; + param.data.qp.qp = msg->set_option.qp; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.qp)); + break; + } + case MVX_FW_SET_QP_P: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_QP_P; + param.data.qp.qp = msg->set_option.qp; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.qp)); + break; + } + case MVX_FW_SET_QP_B: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_QP_B; + param.data.qp.qp = msg->set_option.qp; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.qp)); + break; + } + case MVX_FW_SET_FIXED_QP: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_FIXED_QP; + opt.data.arg = msg->set_option.fixedqp; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + + } + case MVX_FW_SET_INIT_QP_I: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_INIT_QPI; + opt.data.arg = msg->set_option.init_qpi; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_INIT_QP_P: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_INIT_QPP; + opt.data.arg = msg->set_option.init_qpp; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_SAO_LUMA: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_SAO_LUMA_EN; + opt.data.arg = msg->set_option.sao_luma; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_SAO_CHROMA: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_SAO_CHROMA_EN; + opt.data.arg = msg->set_option.sao_chroma; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QP_DELTA_I_P: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_QP_DELTA_I_P; + opt.data.arg = msg->set_option.qp_delta_i_p; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QP_REF_RB_EN: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_REF_RING_BUFFER; + opt.data.arg = msg->set_option.ref_rb_en; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_RC_CLIP_TOP: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_RC_CLIP_TOP; + opt.data.arg = msg->set_option.rc_qp_clip_top; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_RC_CLIP_BOT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_RC_CLIP_BOTTOM; + opt.data.arg = msg->set_option.rc_qp_clip_bot; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QP_MAP_CLIP_TOP: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_QPMAP_CLIP_TOP; + opt.data.arg = msg->set_option.qpmap_qp_clip_top; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QP_MAP_CLIP_BOT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_ENC_QPMAP_CLIP_BOTTOM; + opt.data.arg = msg->set_option.qpmap_qp_clip_bot; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENC_LAMBDA_SCALE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_LAMBDA_SCALE; + memcpy(&opt.data.lambda_scale, &msg->set_option.lambda_scale, sizeof(opt.data.lambda_scale)); + ret = put_fw_opt(fw, &opt, sizeof(opt.data.lambda_scale)); + break; + } + case MVX_FW_SET_RESYNC_INTERVAL: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_RESYNC_INTERVAL; + opt.data.arg = msg->set_option.resync_interval; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_QUANT_TABLE: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_QUANT_TABLE; + + opt.data.quant_table.type = MVE_OPT_QUANT_TABLE_LUMA; + memcpy(opt.data.quant_table.matrix, + msg->set_option.quant_tbl.luma, + sizeof(opt.data.quant_table.matrix)); + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.quant_table)); + if (ret != 0) + break; + + opt.data.quant_table.type = MVE_OPT_QUANT_TABLE_CHROMA; + memcpy(opt.data.quant_table.matrix, + msg->set_option.quant_tbl.chroma, + sizeof(opt.data.quant_table.matrix)); + ret = put_fw_opt(fw, &opt, + sizeof(opt.data.quant_table)); + break; + } + case MVX_FW_SET_HUFF_TABLE: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_HUFFMAN_TABLE; + if (msg->set_option.huff_table.type & MVX_OPT_HUFFMAN_TABLE_DC_LUMA) { + opt.data.huffman_table.type = MVE_OPT_HUFFMAN_TABLE_DC_LUMA; + memcpy(opt.data.huffman_table.number_of_huffman_of_code_length, + msg->set_option.huff_table.dc_luma_code_lenght, + sizeof(msg->set_option.huff_table.dc_luma_code_lenght)); + memcpy(opt.data.huffman_table.table, + msg->set_option.huff_table.dc_luma_table, + sizeof(msg->set_option.huff_table.dc_luma_table)); + ret = put_fw_opt(fw, &opt, + sizeof(opt.index) + sizeof(opt.data.huffman_table.type) + + sizeof(opt.data.huffman_table.number_of_huffman_of_code_length) + 12); + } + if (msg->set_option.huff_table.type & MVX_OPT_HUFFMAN_TABLE_AC_LUMA) { + opt.data.huffman_table.type = MVE_OPT_HUFFMAN_TABLE_AC_LUMA; + memcpy(opt.data.huffman_table.number_of_huffman_of_code_length, + msg->set_option.huff_table.ac_luma_code_lenght, + sizeof(msg->set_option.huff_table.ac_luma_code_lenght)); + memcpy(opt.data.huffman_table.table, + msg->set_option.huff_table.ac_luma_table, + sizeof(msg->set_option.huff_table.ac_luma_table)); + ret = put_fw_opt(fw, &opt, + sizeof(opt.index) + sizeof(opt.data.huffman_table.type) + + sizeof(opt.data.huffman_table.number_of_huffman_of_code_length) + 162); + } + if (msg->set_option.huff_table.type & MVX_OPT_HUFFMAN_TABLE_DC_CHROMA) { + opt.data.huffman_table.type = MVE_OPT_HUFFMAN_TABLE_DC_CHROMA; + memcpy(opt.data.huffman_table.number_of_huffman_of_code_length, + msg->set_option.huff_table.dc_chroma_code_lenght, + sizeof(msg->set_option.huff_table.dc_chroma_code_lenght)); + memcpy(opt.data.huffman_table.table, + msg->set_option.huff_table.dc_chroma_table, + sizeof(msg->set_option.huff_table.dc_chroma_table)); + ret = put_fw_opt(fw, &opt, + sizeof(opt.index) + sizeof(opt.data.huffman_table.type) + + sizeof(opt.data.huffman_table.number_of_huffman_of_code_length) + 12); + } + if (msg->set_option.huff_table.type & MVX_OPT_HUFFMAN_TABLE_AC_CHROMA) { + opt.data.huffman_table.type = MVE_OPT_HUFFMAN_TABLE_AC_CHROMA; + memcpy(opt.data.huffman_table.number_of_huffman_of_code_length, + msg->set_option.huff_table.ac_chroma_code_lenght, + sizeof(msg->set_option.huff_table.ac_chroma_code_lenght)); + memcpy(opt.data.huffman_table.table, + msg->set_option.huff_table.ac_chroma_table, + sizeof(msg->set_option.huff_table.ac_chroma_table)); + ret = put_fw_opt(fw, &opt, + sizeof(opt.index) + sizeof(opt.data.huffman_table.type) + + sizeof(opt.data.huffman_table.number_of_huffman_of_code_length) + 162); + } + + break; + } + case MVX_FW_SET_WATCHDOG_TIMEOUT: { + struct mve_request_set_option opt; + + opt.index = MVE_SET_OPT_INDEX_WATCHDOG_TIMEOUT; + opt.data.arg = msg->set_option.watchdog_timeout; + + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ROI_REGIONS: { + struct mve_buffer_param param; + int i = 0; + param.type = MVE_BUFFER_PARAM_TYPE_REGIONS; + param.data.regions.n_regions = msg->set_option.roi_config.num_roi; + for (;i < msg->set_option.roi_config.num_roi; i++) { + param.data.regions.region[i].mbx_left = msg->set_option.roi_config.roi[i].mbx_left; + param.data.regions.region[i].mbx_right = msg->set_option.roi_config.roi[i].mbx_right; + param.data.regions.region[i].mby_top = msg->set_option.roi_config.roi[i].mby_top; + param.data.regions.region[i].mby_bottom = msg->set_option.roi_config.roi[i].mby_bottom; + param.data.regions.region[i].qp_delta = msg->set_option.roi_config.roi[i].qp_delta; + param.data.regions.region[i].prio = msg->set_option.roi_config.roi[i].prio; + param.data.regions.region[i].force_intra = msg->set_option.roi_config.roi[i].force_intra; + } + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.regions)); + break; + } + case MVX_FW_SET_QP_REGION: { + struct mve_buffer_param param; + + param.type = MVE_BUFFER_PARAM_TYPE_QP; + param.data.qp.qp = msg->set_option.qp; + param.data.qp.epr_iframe_enable = 0; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.qp)); + break; + } + case MVX_FW_SET_EPR_QP: { + struct mve_buffer_param param; + param.type = MVE_BUFFER_PARAM_TYPE_QP; + param.data.qp.qp = msg->set_option.qp; + param.data.qp.epr_iframe_enable = msg->set_option.epr_qp.epr_iframe_enable; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.qp)); + break; + + } + case MVX_FW_SET_CHR_CFG: { + struct mve_buffer_param param; + param.type = MVE_BUFFER_PARAM_TYPE_CHANGE_RECTANGLES; + param.data.change_rectangles.n_rectangles = msg->set_option.chr_cfg.num_chr; + memcpy(param.data.change_rectangles.rectangles, msg->set_option.chr_cfg.rectangle, sizeof(msg->set_option.chr_cfg.rectangle)); + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.regions)); + break; + } + case MVX_FW_SET_DSL_FRAME: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DEC_DOWNSCALE; + opt.data.downscaled_frame.width = msg->set_option.dsl_frame.width; + opt.data.downscaled_frame.height = msg->set_option.dsl_frame.height; + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.downscaled_frame)); + break; + } + case MVX_FW_SET_LONG_TERM_REF: { + struct mve_request_set_option opt; + if (msg->set_option.ltr.mode >= 1 && msg->set_option.ltr.mode <= 8) { + opt.index = MVE_SET_OPT_INDEX_ENC_LTR_MODE; + opt.data.arg = msg->set_option.ltr.mode; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + } + if (msg->set_option.ltr.period >= 2 && msg->set_option.ltr.period <= 254) { + opt.index = MVE_SET_OPT_INDEX_ENC_LTR_PERIOD; + opt.data.arg = msg->set_option.ltr.period; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + } + break; + } + case MVX_FW_SET_DSL_MODE: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DEC_DOWNSCALE_POS_MODE; + opt.data.dsl_pos.mode = msg->set_option.dsl_pos_mode; + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.dsl_pos)); + break; + } + case MVX_FW_SET_DSL_INTERP_MODE:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DEC_DSL_INTERP_MODE; + opt.data.interp_mode.mode = msg->set_option.dsl_interp_mode; + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.interp_mode)); + break; + } + case MVX_FW_SET_MINI_FRAME_CNT: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_MINI_FRAME_MAX; + opt.data.arg = msg->set_option.mini_frame_cnt; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_STATS_MODE: { + struct mve_buffer_param param; + param.type = MVE_BUFFER_PARAM_TYPE_ENC_STATS; + param.data.enc_stats.mms_buffer_size = msg->set_option.enc_stats.mms_buffer_size; + param.data.enc_stats.bitcost_buffer_size = msg->set_option.enc_stats.bitcost_buffer_size; + param.data.enc_stats.qp_buffer_size = msg->set_option.enc_stats.qp_buffer_size; + param.data.enc_stats.flags = msg->set_option.enc_stats.flags; + ret = put_fw_buf_param(fw, ¶m, + sizeof(param.data.enc_stats)); + + break; + } + case MVX_FW_SET_GDR_NUMBER: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_GDR_NUMBER; + opt.data.arg = msg->set_option.gdr_number; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_GDR_PERIOD: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_GDR_PERIOD; + opt.data.arg = msg->set_option.gdr_period; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_MULIT_SPS_PPS:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_MULTI_SPS_PPS; + opt.data.arg = msg->set_option.mulit_sps_pps; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_VISUAL_ENABLE:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_iNDEX_ENC_VISUAL_ENABLE; + opt.data.arg = msg->set_option.enable_visual; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_VISUAL_ENABLE_ADAPTIVE_INTRA_BLOCK:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_SCD_ADAPTIVE_I; + opt.data.arg = msg->set_option.adaptive_intra_block; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ADPTIVE_QUANTISATION:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_ADPTIVE_QUANTISATION; + opt.data.arg = msg->set_option.adapt_qnt; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_DISABLE_FEATURES:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DISABLE_FEATURES; + opt.data.arg = msg->set_option.disabled_features; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_SCD_ENABLE:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_SCD_ENABLE; + opt.data.arg = msg->set_option.scd_enable; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_SCD_PERCENT:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_SCD_PERCENT; + opt.data.arg = msg->set_option.scd_percent; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_SCD_THRESHOLD:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_SCD_THRESHOLD; + opt.data.arg = msg->set_option.scd_threshold; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_AQ_SSIM_EN:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_AQ_SSIM_EN; + opt.data.arg = msg->set_option.aq_ssim_en; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_AQ_NEG_RATIO:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_AQ_NEG_RATIO; + opt.data.arg = msg->set_option.aq_neg_ratio; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_AQ_POS_RATIO:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_AQ_POS_RATIO; + opt.data.arg = msg->set_option.aq_pos_ratio; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_AQ_QPDELTA_LMT:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_AQ_QPDELTA_LMT; + opt.data.arg = msg->set_option.aq_qpdelta_lmt; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_AQ_INIT_FRM_AVG_SVAR:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_AQ_INIT_FRM_AVG_SVAR; + opt.data.arg = msg->set_option.aq_init_frm_avg_svar; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_DEC_YUV2RGB_PARAMS:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DEC_YUV2RGB_PARAMS; + memcpy(&opt.data.yuv2rgb_params,&msg->set_option.yuv2rbg_csc_coef,sizeof(struct mvx_color_conv_coef)); + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.yuv2rgb_params)); + break; + } + case MVX_FW_SET_ENC_FORCED_UV_VAL:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_FORCED_UV_VAL; + opt.data.gray_uv_value.value = msg->set_option.forced_uv_value; + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.gray_uv_value)); + break; + } + case MVX_FW_SET_ENC_SRC_CROPPING:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_SRC_CROPPING; + memcpy(&opt.data.enc_src_crop,&msg->set_option.crop,sizeof(struct mvx_crop_cfg)); + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.enc_src_crop)); + break; + } + case MVX_FW_SET_DEC_DST_CROPPING:{ + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_DEC_DST_CROPPING; + memcpy(&opt.data.dec_dst_crop,&msg->set_option.crop,sizeof(struct mvx_crop_cfg)); + ret = put_fw_opt(fw, &opt, sizeof(opt.index) + sizeof(opt.data.dec_dst_crop)); + break; + } + case MVX_FW_SET_ENC_INTRA_IPENALTY_ANGULAR: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_IPENALTY_ANGULAR; + opt.data.arg = msg->set_option.intra_ipenalty_angular; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENC_INTRA_IPENALTY_PLANAR: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_IPENALTY_PLANAR; + opt.data.arg = msg->set_option.intra_ipenalty_planar; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENC_INTRA_IPENALTY_DC: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_IPENALTY_DC; + opt.data.arg = msg->set_option.intra_ipenalty_dc; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENC_INTER_IPENALTY_ANGULAR: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_ANGULAR; + opt.data.arg = msg->set_option.inter_ipenalty_angular; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENC_INTER_IPENALTY_PLANAR: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_PLANAR; + opt.data.arg = msg->set_option.inter_ipenalty_planar; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + case MVX_FW_SET_ENC_INTER_IPENALTY_DC: { + struct mve_request_set_option opt; + opt.index = MVE_SET_OPT_INDEX_ENC_INTER_IPENALTY_DC; + opt.data.arg = msg->set_option.inter_ipenalty_dc; + ret = put_fw_opt(fw, &opt, sizeof(opt.data.arg)); + break; + } + default: + ret = -EINVAL; + } + + break; + } + case MVX_FW_CODE_FLUSH: { + switch (msg->flush.dir) { + case MVX_DIR_INPUT: + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_INPUT_FLUSH, NULL, + 0, MVX_LOG_FWIF_CHANNEL_MESSAGE); + break; + case MVX_DIR_OUTPUT: + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_OUTPUT_FLUSH, NULL, + 0, MVX_LOG_FWIF_CHANNEL_MESSAGE); + break; + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Invalid flush direction. dir=%d.", + msg->flush.dir); + return -EINVAL; + } + + if (ret == 0) + fw->msg_pending++; + + break; + } + case MVX_FW_CODE_BUFFER: { + struct mve_comm_area_host *host; + struct mve_comm_area_mve *mve; + enum mvx_log_fwif_channel channel; + + if (msg->buf->dir == MVX_DIR_INPUT) { + host = fw->buf_in_host; + mve = fw->buf_in_mve; + channel = MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER; + } else { + host = fw->buf_out_host; + mve = fw->buf_out_mve; + channel = MVX_LOG_FWIF_CHANNEL_OUTPUT_BUFFER; + } + + if (mvx_is_frame(msg->buf->format)) + if ((msg->buf->flags & MVX_BUFFER_FRAME_FLAG_GENERAL) == MVX_BUFFER_FRAME_FLAG_GENERAL) { + ret = put_buffer_general(fw, host, mve, msg, channel); + } else { + ret = put_buffer_frame(fw, host, mve, msg, channel); + } + else + ret = put_buffer_bitstream(fw, host, mve, msg, channel); + + break; + } + case MVX_FW_CODE_IDLE_ACK: { + if (fw->ops_priv.send_idle_ack != NULL) + ret = fw->ops_priv.send_idle_ack(fw); + + break; + } + case MVX_FW_CODE_EOS: { + struct mve_comm_area_host *host; + struct mve_comm_area_mve *mve; + enum mvx_log_fwif_channel channel; + + /* The message is on the MVX_DIR_INPUT side. */ + host = fw->buf_in_host; + mve = fw->buf_in_mve; + channel = MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER; + + if (msg->eos_is_frame != false) { + struct mve_buffer_frame f = { + .host_handle = MVX_FW_CODE_EOS, + .frame_flags = MVE_BUFFER_FRAME_FLAG_EOS, + .format = MVE_FORMAT_YUV420_NV12 + }; + + ret = write_message(fw, host, mve, + MVE_BUFFER_CODE_FRAME, + &f, sizeof(f), channel); + } else { + struct mve_buffer_bitstream b = { + .host_handle = MVX_FW_CODE_EOS, + .bitstream_buf_addr = + MVE_MEM_REGION_PROTECTED_ADDR_BEGIN, + .bitstream_flags = + MVE_BUFFER_BITSTREAM_FLAG_EOS + }; + + ret = write_message(fw, host, mve, + MVE_BUFFER_CODE_BITSTREAM, &b, + sizeof(b), channel); + } + + break; + } + case MVX_FW_CODE_DUMP: { + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_DUMP, NULL, + 0, MVX_LOG_FWIF_CHANNEL_MESSAGE); + fw->msg_pending++; + break; + } + case MVX_FW_CODE_DEBUG: { + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_DEBUG, &msg->arg, + sizeof(msg->arg), MVX_LOG_FWIF_CHANNEL_MESSAGE); + fw->msg_pending++; + break; + } + default: { + ret = -EINVAL; + break; + } + } + + if (ret != 0) + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware put message failed. ret=%d.", ret); + + return ret; +} + +/** + * find_pages() - Find a page allocate in the map. + * @fw: Pointer to firmware object. + * @va: MVE virtual address. + * + * Return: Pointer to pages, NULL if not found. + */ +static struct mvx_mmu_pages *find_pages(struct mvx_fw *fw, + mvx_mmu_va va) +{ + struct mvx_mmu_pages *pages; + + hash_for_each_possible(fw->rpc_mem, pages, node, va) { + if (pages->va == va) + return pages; + } + + return NULL; +} + +static void rpc_mem_alloc(struct mvx_fw *fw, + struct mve_rpc_communication_area *rpc_area) +{ + union mve_rpc_params *p = &rpc_area->params; + enum mvx_fw_region region; + struct mvx_mmu_pages *pages; + size_t npages; + size_t max_pages; + mvx_mmu_va va = 0; + mvx_mmu_va begin, end; + mvx_mmu_va start_va, stop_va; + int ret; + uint8_t log2_alignment; + uint32_t alignment_pages; + uint32_t alignment_bytes; + uint32_t size; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = mutex_lock_interruptible(&fw->rpcmem_mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Cannot protect RPC alloc list."); + goto out; + } + } + + ret = mutex_lock_interruptible(&fw->mem_mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Cannot protect va operation."); + goto unlock_rpc_mutex; + } + + switch (p->mem_alloc.region) { + case MVE_MEM_REGION_PROTECTED: + region = MVX_FW_REGION_PROTECTED; + start_va = fw->next_va_region_protected; + break; + case MVE_MEM_REGION_OUTBUF: + region = MVX_FW_REGION_FRAMEBUF; + start_va = fw->next_va_region_outbuf; + break; + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported RPC mem alloc region. region=%u.", + p->mem_alloc.region); + goto unlock_mutex; + } + + ret = fw->ops.get_region(region, &begin, &end); + if (ret != 0) + goto unlock_mutex; + + stop_va = end; + + npages = DIV_ROUND_UP(p->mem_alloc.size, MVE_PAGE_SIZE); + max_pages = DIV_ROUND_UP(p->mem_alloc.max_size, MVE_PAGE_SIZE); + + if (fw->fw_bin->securevideo != false) { + struct dma_buf *dmabuf; + + dmabuf = mvx_secure_mem_alloc(fw->fw_bin->secure.secure, + p->mem_alloc.size, p->mem_alloc.region); + if (IS_ERR(dmabuf)) + goto unlock_mutex; + + pages = mvx_mmu_alloc_pages_dma_buf(fw->dev, dmabuf, max_pages); + if (IS_ERR(pages)) { + dma_buf_put(dmabuf); + goto unlock_mutex; + } + } else { + pages = mvx_mmu_alloc_pages(fw->dev, npages, max_pages, GFP_KERNEL); + if (IS_ERR(pages)) + goto unlock_mutex; + } + + log2_alignment = p->mem_alloc.log2_alignment <= MVE_PAGE_SHIFT ? MVE_PAGE_SHIFT : p->mem_alloc.log2_alignment; + alignment_bytes = 1 << log2_alignment; + alignment_pages = alignment_bytes >> MVE_PAGE_SHIFT; + size = MVE_PAGE_SIZE * pages->capacity; + ret = -EINVAL; + va = (start_va + alignment_bytes - 1) & ~(alignment_bytes - 1); + do { + mvx_mmu_va tried_size = alignment_bytes; + if (end - va < size) { + /* The remaining VA space to the end of region is not enough, + * so rewind to the beginning of region. Set 'stop va' to + * the searching start point. */ + va = begin; + stop_va = min(start_va, end - size); + } + ret = mvx_mmu_map_pages(fw->mmu, va, pages, MVX_ATTR_SHARED_RW, + MVX_ACCESS_READ_WRITE, &tried_size); + if (ret == 0) + break; + + tried_size = (tried_size + alignment_bytes - 1) & ~(alignment_bytes - 1); + va += tried_size; + } while (va < stop_va); + + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to find memory region for RPC alloc."); + mvx_mmu_free_pages(pages); + va = 0; + goto unlock_mutex; + } + + start_va = (va + size) >= end ? begin : (va + size); + + switch (p->mem_alloc.region) { + case MVE_MEM_REGION_PROTECTED: + fw->next_va_region_protected = start_va; + break; + case MVE_MEM_REGION_OUTBUF: + fw->next_va_region_outbuf = start_va; + break; + default: + break; + } + + hash_add(fw->rpc_mem, &pages->node, pages->va); + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "RPC alloc memory. size=%u, max_size=%u, region=%u, npages=%zu, va=0x%x.", + p->mem_alloc.size, p->mem_alloc.max_size, + p->mem_alloc.region, npages, va); + +unlock_mutex: + mutex_unlock(&fw->mem_mutex); +unlock_rpc_mutex: + if (IS_ENABLED(CONFIG_DEBUG_FS)) + mutex_unlock(&fw->rpcmem_mutex); + +out: + rpc_area->size = sizeof(uint32_t); + p->data[0] = va; +} + +static void rpc_mem_resize(struct mvx_fw *fw, + struct mve_rpc_communication_area *rpc_area) +{ + union mve_rpc_params *p = &rpc_area->params; + struct mvx_mmu_pages *pages; + mvx_mmu_va va = 0; + int ret; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = mutex_lock_interruptible(&fw->rpcmem_mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Cannot protect RPC alloc list."); + goto out; + } + } + + pages = find_pages(fw, p->mem_resize.ve_pointer); + if (pages != 0) { + size_t size; + size_t npages = 0; + int ret; + + if (fw->fw_bin->securevideo != false) { + size = mvx_mmu_size_pages(pages); + + /* The size of RPC memory is only increased. */ + if (size < p->mem_resize.new_size) { + struct dma_buf *dmabuf; + + size = p->mem_resize.new_size - size; + + /* Allocate a new secure DMA buffer. */ + dmabuf = mvx_secure_mem_alloc( + fw->fw_bin->secure.secure, size, + p->mem_alloc.region); + if (IS_ERR(dmabuf)) + goto unlock_mutex; + + ret = mvx_mmu_pages_append_dma_buf( + pages, dmabuf); + if (ret != 0) { + dma_buf_put(dmabuf); + goto unlock_mutex; + } + } + } else { + /* Resize the allocated pages. */ + npages = DIV_ROUND_UP(p->mem_resize.new_size, + MVE_PAGE_SIZE); + ret = mvx_mmu_resize_pages(pages, npages); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to resize RPC mapped pages. ret=%d.", + ret); + goto unlock_mutex; + } + } + + va = pages->va; + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Could not find pages for RPC resize. va=0x%x.", + p->mem_resize.ve_pointer); + } + + fw->client_ops->flush_mmu(fw->csession); + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "RPC resize memory. va=0x%x, new_size=%u.", + p->mem_resize.ve_pointer, p->mem_resize.new_size); + +unlock_mutex: + if (IS_ENABLED(CONFIG_DEBUG_FS)) + mutex_unlock(&fw->rpcmem_mutex); + +out: + rpc_area->size = sizeof(uint32_t); + p->data[0] = va; +} + +static void rpc_mem_free(struct mvx_fw *fw, + struct mve_rpc_communication_area *rpc_area) +{ + union mve_rpc_params *p = &rpc_area->params; + struct mvx_mmu_pages *pages; + int ret; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = mutex_lock_interruptible(&fw->rpcmem_mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Cannot protect RPC alloc list."); + return; + } + } + + pages = find_pages(fw, p->mem_free.ve_pointer); + if (pages != NULL) { + hash_del(&pages->node); + mvx_mmu_free_pages(pages); + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Could not find pages for RPC free. va=0x%x.", + p->mem_free.ve_pointer); + } + + fw->client_ops->flush_mmu(fw->csession); + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "RPC free memory. va=0x%x.", p->mem_free.ve_pointer); + + rpc_area->size = 0; + if (IS_ENABLED(CONFIG_DEBUG_FS)) + mutex_unlock(&fw->rpcmem_mutex); +} + +/** + * rstrip() - Remove trailing chars from string. + * @s: String to be stripped. + * @t: String containing chars to be stripped. + * + * Return: Pointer to stripped string. + */ +static char *rstrip(char *str, + char *trim) +{ + size_t l = strlen(str); + + while (l-- > 0) { + char *t; + + for (t = trim; *t != '\0'; t++) + if (str[l] == *t) { + str[l] = '\0'; + break; + } + + if (*t == '\0') + break; + } + + return str; +} + +static int handle_rpc_v2(struct mvx_fw *fw) +{ + struct mve_rpc_communication_area *rpc_area = fw->rpc; + int ret = 0; + + dma_sync_single_for_cpu(fw->dev, + virt_to_phys(rpc_area), sizeof(*rpc_area), + DMA_FROM_DEVICE); + + if (rpc_area->state == MVE_RPC_STATE_PARAM) { + ret = 1; + + /* Log RPC request. */ + MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO, + log_rpc(fw->session, + MVX_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST, + rpc_area)); + + switch (rpc_area->call_id) { + case MVE_RPC_FUNCTION_DEBUG_PRINTF: { + MVX_LOG_PRINT( + &mvx_log_if, MVX_LOG_INFO, + "RPC_PRINT=%s", + rstrip(rpc_area->params.debug_print.string, + "\n\r")); + break; + } + case MVE_RPC_FUNCTION_MEM_ALLOC: { + rpc_mem_alloc(fw, rpc_area); + break; + } + case MVE_RPC_FUNCTION_MEM_RESIZE: { + rpc_mem_resize(fw, rpc_area); + break; + } + case MVE_RPC_FUNCTION_MEM_FREE: { + rpc_mem_free(fw, rpc_area); + break; + } + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Unsupported RPC request. call_id=%u.", + rpc_area->call_id); + ret = -EINVAL; + break; + } + + /* + * Make sure the whole RPC message body has been written before + * the RPC message area is returned to the firmware. + */ + wmb(); + rpc_area->state = MVE_RPC_STATE_RETURN; + + /* Make sure state is written before memory is flushed. */ + wmb(); + dma_sync_single_for_device( + fw->dev, + virt_to_phys(rpc_area), sizeof(*rpc_area), + DMA_TO_DEVICE); + + /* Log RPC response. */ + MVX_LOG_EXECUTE(&mvx_log_fwif_if, MVX_LOG_INFO, + log_rpc(fw->session, + MVX_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE, + rpc_area)); + + fw->client_ops->send_irq(fw->csession); + } + + return ret; +} + +#define RAM_PRINTBUF_SIZE MVE_FW_PRINT_RAM_SIZE +#define RAM_PRINT_MAX_LEN (128) +#define RAM_PRINT_BUF_CNT ((RAM_PRINTBUF_SIZE / RAM_PRINT_MAX_LEN) - 1) +#define RAM_PRINT_FLAG (0x11223356) +static int handle_fw_ram_print_v2(struct mvx_fw *fw) +{ + struct mve_fw_ram_print_head_aera *rpt_area = fw->fw_print_ram; + int ret = 0; + uint32_t wr_cnt; + uint32_t rd_cnt = 0; + uint32_t cnt; + uint32_t rd_idx; + char *print_buf = NULL; + + dma_sync_single_for_cpu(fw->dev, + virt_to_phys(rpt_area), sizeof(*rpt_area), + DMA_FROM_DEVICE); + + wr_cnt = rpt_area->wr_cnt; + rd_cnt = rpt_area->rd_cnt; + cnt = (rd_cnt <= wr_cnt) ? wr_cnt - rd_cnt : wr_cnt - rd_cnt + (uint32_t)~0u; + + if(RAM_PRINT_FLAG == rpt_area->flag && RAM_PRINT_BUF_CNT > rpt_area->index && cnt){ + //printk("RPT:flag=%x, idx=%u, wr_cnt=%u, rd_cnt=%u.\n", rpt_area->flag, rpt_area->index, wr_cnt, rd_cnt); + + while(cnt--){ + rd_idx = rd_cnt % RAM_PRINT_BUF_CNT; + print_buf = (fw->fw_print_ram + RAM_PRINT_MAX_LEN ) + rd_idx * RAM_PRINT_MAX_LEN; + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, "FW-%u: %s\n", rd_cnt, print_buf); + rd_cnt++; + } + + rpt_area->rd_cnt = rd_cnt; + /* Make sure rpt_area->rd_cnt is written before memory is flushed. */ + wmb(); + dma_sync_single_for_device( + fw->dev, + virt_to_phys(&rpt_area->rd_cnt), sizeof(rpt_area->rd_cnt), + DMA_TO_DEVICE); + + ret = 1; + } + + return ret; +} + +static void unmap_msq(struct mvx_fw *fw, + void **data, + enum mvx_fw_region region) +{ + int ret; + mvx_mmu_va begin; + mvx_mmu_va end; + + if (*data == NULL) + return; + + ret = fw->ops.get_region(region, &begin, &end); + if (ret == 0) + mvx_mmu_unmap_va(fw->mmu, begin, MVE_PAGE_SIZE); + + mvx_mmu_free_page(fw->dev, virt_to_phys(*data)); + + *data = NULL; +} + +static int map_msq(struct mvx_fw *fw, + void **data, + enum mvx_fw_region region) +{ + phys_addr_t page; + mvx_mmu_va begin; + mvx_mmu_va end; + int ret; + + /* Get virtual address where the message queue is to be mapped. */ + ret = fw->ops.get_region(region, &begin, &end); + if (ret != 0) + return ret; + + /* Allocate page and store Linux logical address in 'data'. */ + page = mvx_mmu_alloc_page(fw->dev, GFP_KERNEL | __GFP_ZERO); + if (page == 0) + return -ENOMEM; + + /* Memory map region. */ + ret = mvx_mmu_map_pa(fw->mmu, begin, page, MVE_PAGE_SIZE, + MVX_ATTR_SHARED_COHERENT, MVX_ACCESS_READ_WRITE); + if (ret != 0) { + mvx_mmu_free_page(fw->dev, page); + return ret; + } + + *data = phys_to_virt(page); + + return 0; +} + +static void unmap_fw_print_ram(struct mvx_fw *fw, + void **data, + enum mvx_fw_region region) +{ + int ret; + mvx_mmu_va begin; + mvx_mmu_va end; + + if (*data == NULL) + return; + + ret = fw->ops.get_region(region, &begin, &end); + if (ret == 0) { + mvx_mmu_unmap_pages(fw->print_ram_pages); + mvx_mmu_free_noncontiguous(fw->dev, fw->print_ram_pages, fw->print_ram_sgt, data, MVE_FW_PRINT_RAM_SIZE); + } + + *data = NULL; +} + +static int map_fw_print_ram(struct mvx_fw *fw, + void **data, + enum mvx_fw_region region) +{ + mvx_mmu_va begin; + mvx_mmu_va end; + int ret; + void* vmap; + + /* Get virtual address where the message queue is to be mapped. */ + ret = fw->ops.get_region(region, &begin, &end); + if (ret != 0) + return ret; + + /* Allocate pages and store Linux logical address in 'data'. */ + vmap = mvx_mmu_alloc_noncontiguous(fw->dev, &fw->print_ram_pages, &fw->print_ram_sgt, + MVE_FW_PRINT_RAM_SIZE, GFP_KERNEL | __GFP_ZERO); + if (vmap == NULL) + return -ENOMEM; + + ret = mvx_mmu_map_pages(fw->mmu, begin, fw->print_ram_pages, + MVX_ATTR_SHARED_RW, MVX_ACCESS_READ_WRITE, NULL); + + *data = vmap; + + return 0; +} + +static void unmap_protocol_v2(struct mvx_fw *fw) +{ + struct mvx_mmu_pages *pages; + struct hlist_node *tmp; + int bkt; + + unmap_msq(fw, &fw->msg_host, MVX_FW_REGION_MSG_HOST); + unmap_msq(fw, &fw->msg_mve, MVX_FW_REGION_MSG_MVE); + unmap_msq(fw, &fw->buf_in_host, MVX_FW_REGION_BUF_IN_HOST); + unmap_msq(fw, &fw->buf_in_mve, MVX_FW_REGION_BUF_IN_MVE); + unmap_msq(fw, &fw->buf_out_host, MVX_FW_REGION_BUF_OUT_HOST); + unmap_msq(fw, &fw->buf_out_mve, MVX_FW_REGION_BUF_OUT_MVE); + unmap_msq(fw, &fw->rpc, MVX_FW_REGION_RPC); + unmap_fw_print_ram(fw, &fw->fw_print_ram, MVX_FW_REGION_PRINT_RAM); + + hash_for_each_safe(fw->rpc_mem, bkt, tmp, pages, node) { + hash_del(&pages->node); + mvx_mmu_free_pages(pages); + } +} + +static int map_protocol_v2(struct mvx_fw *fw) +{ + int ret; + + ret = map_msq(fw, &fw->msg_host, MVX_FW_REGION_MSG_HOST); + if (ret != 0) + goto unmap_fw; + + ret = map_msq(fw, &fw->msg_mve, MVX_FW_REGION_MSG_MVE); + if (ret != 0) + goto unmap_fw; + + ret = map_msq(fw, &fw->buf_in_host, MVX_FW_REGION_BUF_IN_HOST); + if (ret != 0) + goto unmap_fw; + + ret = map_msq(fw, &fw->buf_in_mve, MVX_FW_REGION_BUF_IN_MVE); + if (ret != 0) + goto unmap_fw; + + ret = map_msq(fw, &fw->buf_out_host, MVX_FW_REGION_BUF_OUT_HOST); + if (ret != 0) + goto unmap_fw; + + ret = map_msq(fw, &fw->buf_out_mve, MVX_FW_REGION_BUF_OUT_MVE); + if (ret != 0) + goto unmap_fw; + + ret = map_msq(fw, &fw->rpc, MVX_FW_REGION_RPC); + if (ret != 0) + goto unmap_fw; + + ret = map_fw_print_ram(fw, &fw->fw_print_ram, MVX_FW_REGION_PRINT_RAM); + if (ret != 0) + goto unmap_fw; + + return 0; + +unmap_fw: + unmap_protocol_v2(fw); + + return ret; +} + +static void print_pair(char *name_in, + char *name_out, + struct device *device, + struct mve_comm_area_host *host, + struct mve_comm_area_mve *mve, + int ind, + struct seq_file *s) +{ + dma_sync_single_for_cpu(device, virt_to_phys(mve), + MVE_PAGE_SIZE, DMA_FROM_DEVICE); + mvx_seq_printf(s, name_in, ind, "wr=%10d, rd=%10d, avail=%10d\n", + host->in_wpos, mve->in_rpos, + (uint16_t)(host->in_wpos - mve->in_rpos)); + mvx_seq_printf(s, name_out, ind, "wr=%10d, rd=%10d, avail=%10d\n", + mve->out_wpos, host->out_rpos, + (uint16_t)(mve->out_wpos - host->out_rpos)); +} + +static int print_stat_v2(struct mvx_fw *fw, + int ind, + struct seq_file *s) +{ + print_pair("Msg host->mve", "Msg host<-mve", + fw->dev, fw->msg_host, fw->msg_mve, + ind, s); + print_pair("Inbuf host->mve", "Inbuf host<-mve", + fw->dev, fw->buf_in_host, fw->buf_in_mve, + ind, s); + print_pair("Outbuf host->mve", "Outbuf host<-mve", + fw->dev, fw->buf_out_host, fw->buf_out_mve, + ind, s); + + return 0; +} + +static ssize_t get_capacity(int rpos, + int wpos) +{ + ssize_t capacity; + + capacity = wpos - rpos; + if (capacity < 0) + capacity += MVE_COMM_QUEUE_SIZE_IN_WORDS; + + return capacity * sizeof(uint32_t); +} + +static void print_debug_v2(struct mvx_fw *fw) +{ + struct mve_comm_area_host *msg_host = fw->msg_host; + struct mve_comm_area_mve *msg_mve = fw->msg_mve; + unsigned int rpos, wpos; + ssize_t capacity; + struct mve_msg_header header; + + dma_sync_single_for_cpu(fw->dev, virt_to_phys(msg_mve), + MVE_PAGE_SIZE, DMA_FROM_DEVICE); + + MVX_LOG_PRINT_SESSION(&mvx_log_session_if, MVX_LOG_WARNING, fw->session, + "Dump message queue. msg={host={out_rpos=%u, in_wpos=%u}, mve={out_wpos=%u, in_rpos=%u}}", + msg_host->out_rpos, msg_host->in_wpos, + msg_mve->out_wpos, msg_mve->in_rpos); + + rpos = msg_host->out_rpos; + wpos = msg_mve->out_wpos; + + while ((capacity = get_capacity(rpos, wpos)) >= sizeof(header)) { + unsigned int pos; + + pos = read32n(msg_mve->out_data, rpos, (uint32_t *)&header, + sizeof(header)); + + MVX_LOG_PRINT_SESSION(&mvx_log_session_if, MVX_LOG_WARNING, + fw->session, + "mve -> host queue={rpos=%u, wpos=%u, capacity=%u}, msg={code=%u, size=%u}", + rpos, wpos, capacity, + header.code, header.size); + + capacity = get_capacity(pos, wpos); + if (header.size > capacity) { + MVX_LOG_PRINT_SESSION( + &mvx_log_session_if, MVX_LOG_WARNING, + fw->session, + "Size is larger than capacity. capacity=%zd, size=%u.", + capacity, header.size); + return; + } + + rpos = (pos + DIV_ROUND_UP(header.size, sizeof(uint32_t))) % + MVE_COMM_QUEUE_SIZE_IN_WORDS; + } + + rpos = msg_mve->in_rpos; + wpos = msg_host->in_wpos; + while ((capacity = get_capacity(rpos, wpos)) >= sizeof(header)) { + unsigned int pos; + + pos = read32n(msg_host->in_data, rpos, (uint32_t *)&header, + sizeof(header)); + + MVX_LOG_PRINT_SESSION(&mvx_log_session_if, MVX_LOG_WARNING, + fw->session, + "host -> mve queue={rpos=%u, wpos=%u, capacity=%u}, msg={code=%u, size=%u}", + rpos, wpos, capacity, + header.code, header.size); + + capacity = get_capacity(pos, wpos); + if (header.size > capacity) { + MVX_LOG_PRINT_SESSION( + &mvx_log_session_if, MVX_LOG_WARNING, + fw->session, + "Size is larger than capacity. capacity=%zd, size=%u.", + capacity, header.size); + return; + } + + rpos = (pos + DIV_ROUND_UP(header.size, sizeof(uint32_t))) % + MVE_COMM_QUEUE_SIZE_IN_WORDS; + } +} + +int mvx_fw_send_idle_ack_v2(struct mvx_fw *fw) +{ + int ret = 0; + + ret = write_message(fw, fw->msg_host, fw->msg_mve, + MVE_REQUEST_CODE_IDLE_ACK, + NULL, 0, + MVX_LOG_FWIF_CHANNEL_MESSAGE); + + return ret; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_fw_construct_v2(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask, + unsigned char major, + unsigned char minor) +{ + int ret; + + ret = mvx_fw_construct(fw, fw_bin, mmu, session, client_ops, csession, + core_mask); + fw->next_va_region_protected = MVE_MEM_REGION_PROTECTED_ADDR_BEGIN; + fw->next_va_region_outbuf = MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN; + if (ret != 0) + return ret; + + fw->ops.map_protocol = map_protocol_v2; + fw->ops.unmap_protocol = unmap_protocol_v2; + fw->ops.get_region = get_region_v2; + fw->ops.get_message = get_message_v2; + fw->ops.put_message = put_message_v2; + fw->ops.handle_rpc = handle_rpc_v2; + fw->ops.handle_fw_ram_print = handle_fw_ram_print_v2; + fw->ops.print_stat = print_stat_v2; + fw->ops.print_debug = print_debug_v2; + fw->ops_priv.send_idle_ack = NULL; + fw->ops_priv.to_mve_profile = mvx_fw_to_mve_profile_v2; + fw->ops_priv.to_mve_level = mvx_fw_to_mve_level_v2; + + if (major == 2 && minor >= 4) + fw->ops_priv.send_idle_ack = mvx_fw_send_idle_ack_v2; + + return 0; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_v3.c b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_v3.c new file mode 100755 index 000000000000..2854e731a254 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_firmware_v3.c @@ -0,0 +1,173 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_firmware_priv.h" +#include "fw_v3/mve_protocol_def.h" + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +static int get_region_v3(enum mvx_fw_region region, + uint32_t *begin, + uint32_t *end) +{ + switch (region) { + case MVX_FW_REGION_CORE_0: + *begin = MVE_MEM_REGION_FW_INSTANCE0_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE0_ADDR_END; + break; + case MVX_FW_REGION_CORE_1: + *begin = MVE_MEM_REGION_FW_INSTANCE1_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE1_ADDR_END; + break; + case MVX_FW_REGION_CORE_2: + *begin = MVE_MEM_REGION_FW_INSTANCE2_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE2_ADDR_END; + break; + case MVX_FW_REGION_CORE_3: + *begin = MVE_MEM_REGION_FW_INSTANCE3_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE3_ADDR_END; + break; + case MVX_FW_REGION_CORE_4: + *begin = MVE_MEM_REGION_FW_INSTANCE4_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE4_ADDR_END; + break; + case MVX_FW_REGION_CORE_5: + *begin = MVE_MEM_REGION_FW_INSTANCE5_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE5_ADDR_END; + break; + case MVX_FW_REGION_CORE_6: + *begin = MVE_MEM_REGION_FW_INSTANCE6_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE6_ADDR_END; + break; + case MVX_FW_REGION_CORE_7: + *begin = MVE_MEM_REGION_FW_INSTANCE7_ADDR_BEGIN; + *end = MVE_MEM_REGION_FW_INSTANCE7_ADDR_END; + break; + case MVX_FW_REGION_PROTECTED: + *begin = MVE_MEM_REGION_PROTECTED_ADDR_BEGIN; + *end = MVE_MEM_REGION_PROTECTED_ADDR_END; + break; + case MVX_FW_REGION_FRAMEBUF: + *begin = MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN; + *end = MVE_MEM_REGION_FRAMEBUF_ADDR_END; + break; + case MVX_FW_REGION_MSG_HOST: + *begin = MVE_COMM_MSG_INQ_ADDR; + *end = MVE_COMM_MSG_INQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_MSG_MVE: + *begin = MVE_COMM_MSG_OUTQ_ADDR; + *end = MVE_COMM_MSG_OUTQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_IN_HOST: + *begin = MVE_COMM_BUF_INQ_ADDR; + *end = MVE_COMM_BUF_INQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_IN_MVE: + *begin = MVE_COMM_BUF_INRQ_ADDR; + *end = MVE_COMM_BUF_INRQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_OUT_HOST: + *begin = MVE_COMM_BUF_OUTQ_ADDR; + *end = MVE_COMM_BUF_OUTQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_BUF_OUT_MVE: + *begin = MVE_COMM_BUF_OUTRQ_ADDR; + *end = MVE_COMM_BUF_OUTRQ_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_RPC: + *begin = MVE_COMM_RPC_ADDR; + *end = MVE_COMM_RPC_ADDR + MVE_PAGE_SIZE; + break; + case MVX_FW_REGION_PRINT_RAM: + *begin = MVE_FW_PRINT_RAM_ADDR; + *end = MVE_FW_PRINT_RAM_ADDR + MVE_FW_PRINT_RAM_SIZE; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int to_mve_profile_v3(unsigned int mvx_profile, + uint16_t *mve_profile) +{ + int ret = 0; + + switch (mvx_profile) { + case MVX_PROFILE_H264_HIGH_10: + *mve_profile = MVE_OPT_PROFILE_H264_HIGH_10; + break; + default: + ret = mvx_fw_to_mve_profile_v2(mvx_profile, mve_profile); + } + + return ret; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_fw_construct_v3(struct mvx_fw *fw, + struct mvx_fw_bin *fw_bin, + struct mvx_mmu *mmu, + struct mvx_session *session, + struct mvx_client_ops *client_ops, + struct mvx_client_session *csession, + unsigned int core_mask, + unsigned char major, + unsigned char minor) +{ + int ret; + + ret = mvx_fw_construct_v2(fw, fw_bin, mmu, session, client_ops, + csession, core_mask, major, minor); + fw->next_va_region_protected = MVE_MEM_REGION_PROTECTED_ADDR_BEGIN; + fw->next_va_region_outbuf = MVE_MEM_REGION_FRAMEBUF_ADDR_BEGIN; + if (ret != 0) + return ret; + + fw->ops.get_region = get_region_v3; + fw->ops_priv.to_mve_profile = to_mve_profile_v3; + + if (major == 3 && minor >= 1) + fw->ops_priv.send_idle_ack = mvx_fw_send_idle_ack_v2; + + return 0; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_if.c b/drivers/media/platform/cix/cix_vpu/if/mvx_if.c new file mode 100755 index 000000000000..8a9ce6aa23c3 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_if.c @@ -0,0 +1,246 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvx_ext_if.h" +#include "mvx_if.h" +#include "mvx_log_group.h" +#include "mvx_firmware.h" +#include "mvx_firmware_cache.h" +#include "mvx_secure.h" +#include "mvx_session.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +/** + * struct mvx_if_ctx - Device context. + * + * There is one instance of this structure for each device. + */ +struct mvx_if_ctx { + struct device *dev; + struct mvx_ext_if ext[MVX_EXT_IF_COUNT]; + struct mvx_fw_cache firmware; + struct mvx_client_ops *client_ops; + struct mvx_if_ops if_ops; + struct mvx_secure secure; + struct kobject kobj; + struct completion kobj_unregister; + struct dentry *dentry; +}; + +/**************************************************************************** + * Static variables and functions + ****************************************************************************/ + +/* Physical hardware can handle 40 physical bits. */ +static uint64_t mvx_if_dma_mask = DMA_BIT_MASK(40); + +static struct mvx_if_ctx *if_ops_to_if_ctx(struct mvx_if_ops *ops) +{ + return container_of(ops, struct mvx_if_ctx, if_ops); +} + +static void if_release(struct kobject *kobj) +{ + struct mvx_if_ctx *ctx = container_of(kobj, struct mvx_if_ctx, kobj); + + complete(&ctx->kobj_unregister); +} + +static struct kobj_type if_ktype = { + .release = if_release, + .sysfs_ops = &kobj_sysfs_ops +}; + +/**************************************************************************** + * Exported variables and functions + ****************************************************************************/ + +struct mvx_if_ops *mvx_if_create(struct device *dev, + struct mvx_client_ops *client_ops, + void *priv) +{ + struct mvx_if_ctx *ctx; + int ret; + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, "probe"); + + dev->dma_mask = &mvx_if_dma_mask; + dev->coherent_dma_mask = mvx_if_dma_mask; + + /* + * This parameter is indirectly used by DMA-API to limit a lookup + * through a hash table with allocated DMA regions. If the value is + * not high enough, a lookup will be terminated too early and a false + * negative warning will be generated for every DMA operation. + * + * To prevent this behavior vb2-dma-contig allocator keeps this value + * set to the maximum requested buffer size. Unfortunately this is not + * done for vb2-dma-sg which we are using, so we have to implement the + * same logic. + * + * In this change I set a value permanently to 2Gb, but in the next + * commit a functionality similar to vb2-dma-contig will be added. + * + * Mentioned structure also has one more member: segment_boundary_mask. + * It has to be investigated if any value should be assigned to it. + * + * See the following kernel commit for the reference: + * 3f03396918962b2f8b888d02b23cd1e0c88bf5e5 + */ + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + if (dev->dma_parms == NULL) + return ERR_PTR(-ENOMEM); + + dma_set_max_seg_size(dev, SZ_2G); + +#if (KERNEL_VERSION(4, 1, 0) <= LINUX_VERSION_CODE) && IS_ENABLED(CONFIG_OF) + of_dma_configure_id(dev, dev->of_node, 0, NULL); +#endif + + /* Create device context and store pointer in device private data. */ + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) { + ret = -ENOMEM; + goto free_dma_parms; + } + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + char name[20]; + + scnprintf(name, sizeof(name), "%s%u", MVX_IF_NAME, dev->id); + ctx->dentry = debugfs_create_dir(name, NULL); + if (IS_ERR_OR_NULL(ctx->dentry)) { + ret = -EINVAL; + goto free_ctx; + } + } + + /* Store context in device private data. */ + ctx->dev = dev; + ctx->client_ops = client_ops; + + /* Initialize if ops. */ + ctx->if_ops.irq = mvx_session_irq; + + init_completion(&ctx->kobj_unregister); + + /* Create sysfs entry for the device */ + ret = kobject_init_and_add(&ctx->kobj, &if_ktype, + kernel_kobj, "amvx%u", dev->id); + if (ret != 0) { + kobject_put(&ctx->kobj); + goto remove_debugfs; + } + + /* Initialize secure video. */ + ret = mvx_secure_construct(&ctx->secure, dev); + if (ret != 0) + goto delete_kobject; + + /* Initialize firmware cache. */ + ret = mvx_fw_cache_construct(&ctx->firmware, dev, &ctx->secure, + &ctx->kobj); + if (ret != 0) + goto destroy_secure; + + /* Create the external device interface. */ + ret = mvx_ext_if_construct(ctx->ext, dev, &ctx->firmware, + ctx->client_ops, ctx->dentry); + if (ret != 0) + goto destroy_fw_cache; + + return &ctx->if_ops; + +destroy_fw_cache: + mvx_fw_cache_destruct(&ctx->firmware); + +destroy_secure: + mvx_secure_destruct(&ctx->secure); + +delete_kobject: + kobject_put(&ctx->kobj); + +remove_debugfs: + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(ctx->dentry); + +free_ctx: + devm_kfree(dev, ctx); + +free_dma_parms: + devm_kfree(dev, dev->dma_parms); + + return ERR_PTR(ret); +} + +void mvx_if_destroy(struct mvx_if_ops *if_ops) +{ + struct mvx_if_ctx *ctx = if_ops_to_if_ctx(if_ops); + struct device *dev = ctx->dev; + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, "remove"); + + mvx_ext_if_destruct(ctx->ext); + mvx_fw_cache_destruct(&ctx->firmware); + mvx_secure_destruct(&ctx->secure); + kobject_put(&ctx->kobj); + wait_for_completion(&ctx->kobj_unregister); + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(ctx->dentry); + + devm_kfree(dev, dev->dma_parms); + devm_kfree(dev, ctx); + + dev->dma_mask = NULL; + dev->coherent_dma_mask = 0; +} + +void mvx_if_flush_work(struct mvx_if_ops *if_ops) +{ + struct mvx_if_ctx *ctx = if_ops_to_if_ctx(if_ops); + + flush_workqueue(ctx->secure.workqueue); +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_if.h b/drivers/media/platform/cix/cix_vpu/if/mvx_if.h new file mode 100755 index 000000000000..6df4f53ea13a --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_if.h @@ -0,0 +1,590 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_IF_H_ +#define _MVX_IF_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include "mvx_mmu.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +/** + * The name of the device driver. + */ +#define MVX_IF_NAME "amvx_if" + + +#define MVX_INVALID_VAL (-1) + +#define MVX_IS_LEGACY_FORMAT(format) \ + (!((format) == MVX_FORMAT_AV1 || (format) == MVX_FORMAT_HEVC || \ + (format) == MVX_FORMAT_H264 || (format) == MVX_FORMAT_VP9 || \ + (format) == MVX_FORMAT_AVS2)) + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct mvx_client_ops; +struct mvx_client_session; +struct mvx_if_ctx; +struct platform_device; + +/** + * enum mvx_direction - Direction from the point of view of the hardware block. + */ +enum mvx_direction { + MVX_DIR_INPUT, + MVX_DIR_OUTPUT, + MVX_DIR_MAX +}; + +/** + * enum mvx_tristate - Tristate boolean variable. + */ +enum mvx_tristate { + MVX_TRI_UNSET = -1, + MVX_TRI_TRUE = 0, + MVX_TRI_FALSE = 1 +}; + +/** + * enum mvx_format - List of compressed formats and frame formats. + * + * Enumeration of formats that are supported by all know hardware revisions. + * + * The enumeration should start at 0 and should not contain any gaps. + */ +enum mvx_format { + /* Compressed formats. */ + MVX_FORMAT_BITSTREAM_FIRST, + MVX_FORMAT_AVS = MVX_FORMAT_BITSTREAM_FIRST, + MVX_FORMAT_AVS2, + MVX_FORMAT_H263, + MVX_FORMAT_H264, + MVX_FORMAT_HEVC, + MVX_FORMAT_JPEG, + MVX_FORMAT_MPEG2, + MVX_FORMAT_MPEG4, + MVX_FORMAT_RV, + MVX_FORMAT_VC1, + MVX_FORMAT_VP8, + MVX_FORMAT_VP9, + MVX_FORMAT_AV1, + MVX_FORMAT_BITSTREAM_LAST = MVX_FORMAT_AV1, + + /* Uncompressed formats. */ + MVX_FORMAT_FRAME_FIRST, + MVX_FORMAT_YUV420_AFBC_8 = MVX_FORMAT_FRAME_FIRST, + MVX_FORMAT_YUV420_AFBC_10, + MVX_FORMAT_Y_AFBC_8, + MVX_FORMAT_Y_AFBC_10, + MVX_FORMAT_YUV422_AFBC_8, + MVX_FORMAT_YUV422_AFBC_10, + MVX_FORMAT_YUV420_I420, + MVX_FORMAT_YUV420_NV12, + MVX_FORMAT_YUV420_NV21, + MVX_FORMAT_YUV420_P010, + MVX_FORMAT_YUV420_Y0L2, + MVX_FORMAT_YUV420_AQB1, + MVX_FORMAT_YUV422_YUY2, + MVX_FORMAT_YUV422_UYVY, + MVX_FORMAT_YUV422_Y210, + MVX_FORMAT_RGBA_8888, + MVX_FORMAT_BGRA_8888, + MVX_FORMAT_ARGB_8888, + MVX_FORMAT_ABGR_8888, + MVX_FORMAT_RGB_888, + MVX_FORMAT_BGR_888, + MVX_FORMAT_RGB_888_3P, + MVX_FORMAT_ARGB_1555, + MVX_FORMAT_ARGB_4444, + MVX_FORMAT_RGB_565, + MVX_FORMAT_Y, + MVX_FORMAT_Y_10, + MVX_FORMAT_YUV444, + MVX_FORMAT_YUV444_10, + MVX_FORMAT_YUV420_2P_10, + MVX_FORMAT_YUV422_1P_10, + MVX_FORMAT_YUV420_I420_10, + MVX_FORMAT_FRAME_LAST = MVX_FORMAT_YUV420_I420_10, + + MVX_FORMAT_MAX +}; + +/** + * enum mvx_hw_id - Enumeration of known hardware revisions. + */ +enum mvx_hw_id { + MVE_Unknown = 0x0, + MVE_v500 = 0x500, + MVE_v550 = 0x550, + MVE_v61 = 0x61, + MVE_v52_v76 = 0x5276 +}; + +/** + * enum mvx_hw_svnrev - Enumeration of hardware SVN revisions. + */ +enum mvx_hw_svnrev { + MVE_SVN_UNKNOWN = 0X0, + /* software control VPU core memory repair */ + MVE_SVN_ENPWOFF = 0xe0c1afe1, + /* the maximum resolution is 4096x4096 except for (M)JPEG */ + MVE_SVN_4K = 0x80e11117 +}; + +/** + * struct mvx_hw_ver - Hardware version. + */ +struct mvx_hw_ver { + enum mvx_hw_id id; + uint32_t revision; + uint32_t svn_revision; + uint32_t patch; +}; + +/** + * enum mvx_nalu_format - NALU format. + */ +enum mvx_nalu_format { + MVX_NALU_FORMAT_UNDEFINED, + MVX_NALU_FORMAT_START_CODES, + MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER, + MVX_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD, + MVX_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD, + MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD +}; + +/** + * enum mvx_profile - Profile for encoder. + */ +enum mvx_profile { + MVX_PROFILE_NONE, + + MVX_PROFILE_H264_BASELINE, + MVX_PROFILE_H264_MAIN, + MVX_PROFILE_H264_HIGH, + MVX_PROFILE_H264_HIGH_10, + + MVX_PROFILE_H265_MAIN, + MVX_PROFILE_H265_MAIN_STILL, + MVX_PROFILE_H265_MAIN_INTRA, + MVX_PROFILE_H265_MAIN_10, + + MVX_PROFILE_VC1_SIMPLE, + MVX_PROFILE_VC1_MAIN, + MVX_PROFILE_VC1_ADVANCED, + + MVX_PROFILE_VP8_MAIN +}; + +/** + * enum mvx_level - Level for encoder. + */ +enum mvx_level { + MVX_LEVEL_NONE, + + MVX_LEVEL_H264_1, + MVX_LEVEL_H264_1b, + MVX_LEVEL_H264_11, + MVX_LEVEL_H264_12, + MVX_LEVEL_H264_13, + MVX_LEVEL_H264_2, + MVX_LEVEL_H264_21, + MVX_LEVEL_H264_22, + MVX_LEVEL_H264_3, + MVX_LEVEL_H264_31, + MVX_LEVEL_H264_32, + MVX_LEVEL_H264_4, + MVX_LEVEL_H264_41, + MVX_LEVEL_H264_42, + MVX_LEVEL_H264_5, + MVX_LEVEL_H264_51, + MVX_LEVEL_H264_52, + MVX_LEVEL_H264_6, + MVX_LEVEL_H264_61, + MVX_LEVEL_H264_62, + + MVX_LEVEL_H265_1, + MVX_LEVEL_H265_2, + MVX_LEVEL_H265_21, + MVX_LEVEL_H265_3, + MVX_LEVEL_H265_31, + MVX_LEVEL_H265_4, + MVX_LEVEL_H265_41, + MVX_LEVEL_H265_5, + MVX_LEVEL_H265_51, + MVX_LEVEL_H265_52, + MVX_LEVEL_H265_6, + MVX_LEVEL_H265_61, + MVX_LEVEL_H265_62 +}; + +/** + * enum mvx_tier - tier for HEVC encoder. + */ +enum mvx_tier { + MVX_TIER_NONE, + + MVX_TIER_MAIN, + MVX_TIER_HIGH +}; + +/** + * enum mvx_gop_type - GOP type for encoder. + */ +enum mvx_gop_type { + MVX_GOP_TYPE_NONE, + MVX_GOP_TYPE_BIDIRECTIONAL, + MVX_GOP_TYPE_LOW_DELAY, + MVX_GOP_TYPE_PYRAMID, + MVX_GOP_TYPE_SVCT3, + MVX_GOP_TYPE_GDR +}; + +/** + * enum mvx_entropy_mode - Entropy mode for encoder. + */ +enum mvx_entropy_mode { + MVX_ENTROPY_MODE_NONE, + MVX_ENTROPY_MODE_CAVLC, + MVX_ENTROPY_MODE_CABAC +}; + +/** + * enum mvx_multi_slice_mode - Multi slice mode. + */ +enum mvx_multi_slice_mode { + MVX_MULTI_SLICE_MODE_SINGLE, + MVX_MULTI_SLICE_MODE_MAX_MB +}; + +/** + * enum mvx_vp9_prob_update - Probability update method. + */ +enum mvx_vp9_prob_update { + MVX_VP9_PROB_UPDATE_DISABLED, + MVX_VP9_PROB_UPDATE_IMPLICIT, + MVX_VP9_PROB_UPDATE_EXPLICIT +}; + +/** + * enum mvx_rgb_to_yuv_mode - RGB to YUV conversion mode. + */ +enum mvx_rgb_to_yuv_mode { + MVX_RGB_TO_YUV_MODE_BT601_STUDIO, + MVX_RGB_TO_YUV_MODE_BT601_FULL, + MVX_RGB_TO_YUV_MODE_BT709_STUDIO, + MVX_RGB_TO_YUV_MODE_BT709_FULL, + MVX_RGB_TO_YUV_MODE_BT2020_STUDIO, + MVX_RGB_TO_YUV_MODE_BT2020_FULL, + MVX_RGB_TO_YUV_MODE_MAX +}; + +/** + * enum mvx_yuv_to_rgb_mode - YUV to RGB conversion mode. + */ +enum mvx_yuv_to_rgb_mode { + MVX_YUV_TO_RGB_MODE_BT601_LIMT, + MVX_YUV_TO_RGB_MODE_BT601_FULL, + MVX_YUV_TO_RGB_MODE_BT709_LIMT, + MVX_YUV_TO_RGB_MODE_BT709_FULL, + MVX_YUV_TO_RGB_MODE_BT2020_LIMT, + MVX_YUV_TO_RGB_MODE_BT2020_FULL, + MVX_YUV_TO_RGB_MODE_MAX +}; + +/** + * enum use_cust_yuv_to_rgb_mode - YUV to RGB conversion mode. + */ +enum use_cust_yuv_to_rgb_mode { + MVX_CUST_YUV2RGB_MODE_UNSET, + MVX_CUST_YUV2RGB_MODE_STANDARD, + MVX_CUST_YUV2RGB_MODE_CUSTOMIZED +}; + +/** + * enum mvx_chroma_format - chroma formats. + */ +enum mvx_chroma_format { + MVX_CHROMA_FORMAT_MONO = 0, + MVX_CHROMA_FORMAT_420 = 1, + MVX_CHROMA_FORMAT_422 = 2 +}; + + +/** + * struct mvx_if_session - Structure holding members needed to map a session to + * a hardare device. + * @kref: Reference counter for the session object. + * @release: Function pointer that shall be passed to kref_put. If the + * reference count reaches 0 this function will be called to + * destruct and deallocate the object. + * @ncores: Number of cores this session has been mapped to. + * @core_mask: Core mask used by this session. + * @l0_pte: Level 0 page table entry. This value is written to the hardware + * MMU CTRL register to point out the location of the L1 page table + * and to set access permissions and bus attributes. + * @securevideo:Secure video enabled. + */ +struct mvx_if_session { + struct kref kref; + struct mutex *mutex; + void (*release)(struct kref *kref); + unsigned int ncores; + unsigned int core_mask; + mvx_mmu_pte l0_pte; + bool securevideo; +}; + +/** + * struct mvx_if_ops - Functions pointers the registered device may use to call + * the if device. + */ +struct mvx_if_ops { + /** + * irq() - Handle IRQ sent from firmware to driver. + */ + void (*irq)(struct mvx_if_session *session); +}; + +/** + * struct mvx_client_ops - Functions pointers the if device may use to call + * the registered device. + */ +struct mvx_client_ops { + struct list_head list; + + /** + * get_hw_ver() - Get MVE hardware version + */ + void (*get_hw_ver)(struct mvx_client_ops *client, + struct mvx_hw_ver *version); + + /** + * get_formats() - Get list of supported formats. + * + * Return: 0 on success, else error code. + */ + void (*get_formats)(struct mvx_client_ops *client, + enum mvx_direction direction, + uint64_t *formats); + + /** + * get_core_mask() - Get core mask. + * + * Return: core mask, else error code. + */ + unsigned int (*get_core_mask)(struct mvx_client_ops *client); + + /* + * SESSION. + */ + + /** + * register_session() - Register if session with client. + * + * Return: Client session handle on success, else ERR_PTR. + */ + struct mvx_client_session + *(*register_session)(struct mvx_client_ops *client, + struct mvx_if_session *session); + + /** + * unregister_session() - Unregister session. + * + * Return: 0 on success, else error code. + */ + void (*unregister_session)(struct mvx_client_session *session); + + /** + * switch_in() - Switch in session. + * + * After a session has been switched in it must wait for a 'switched + * out' event before it is allowed to requested to be switched in again. + * Switching in a already switched in session is regarded as an error. + * + * Return: 0 on success, else error code. + */ + int (*switch_in)(struct mvx_client_session *session); + + int (*switch_out_rsp)(struct mvx_client_session *session); + + /** + * send_irq() - Send IRQ from driver to firmware. + * + * Return: 0 on success, else error code. + */ + int (*send_irq)(struct mvx_client_session *session); + + /** + * soft_irq() - Soft trigger an irq to fetch firmware message anagin + * in case driver miss some firmware message. + * + * Return: 0 on success, else error code. + */ + int (*soft_irq)(struct mvx_client_session *session); + + /** + * flush_mmu() - Flush MMU tables. + * + * Flushing MMU tables is only required if pages have been removed + * from the page tables. + * + * Return: 0 on success, else error code. + */ + int (*flush_mmu)(struct mvx_client_session *session); + + /** + * print_debug() - Print debug information. + * + * Return: 0 on success, else error code. + */ + void (*print_debug)(struct mvx_client_session *session); + + /** + * update_load() - Update vpu work loading. + * + * Return: 0 on success, else error code. + */ + int (*update_load)(struct mvx_client_session *session); + + /** + * terminate() - terminate a session. + * + * Return: None. + */ + void (*terminate)(struct mvx_client_session *session); + + /** + * reset_priority() - reset priority of session. + * + * Return: None. + */ + void (*reset_priority)(struct mvx_client_session *session); + +}; + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +/** + * mvx_is_bitstream(): Detect if format is of type bitstream. + * @format: Format. + * + * Return: True if format is bitstream, else false. + */ +static inline bool mvx_is_bitstream(enum mvx_format format) +{ + return (format >= MVX_FORMAT_BITSTREAM_FIRST) && + (format <= MVX_FORMAT_BITSTREAM_LAST); +} + +/** + * mvx_is_frame(): Detect if format is of type frame. + * @format: Format. + * + * Return: True if format is frame, else false. + */ +static inline bool mvx_is_frame(enum mvx_format format) +{ + return (format >= MVX_FORMAT_FRAME_FIRST) && + (format <= MVX_FORMAT_FRAME_LAST); +} + +/** + * mvx_is_rgb(): Detect if format is of type RGB. + * @format: Format. + * + * Return: True if format is RGB, else false. + */ +static inline bool mvx_is_rgb(enum mvx_format format) +{ + return (format >= MVX_FORMAT_RGBA_8888) && + (format <= MVX_FORMAT_ABGR_8888); +} + +/** + * mvx_is_rgb24(): Detect if format is of type RGB24. + * @format: Format. + * + * Return: True if format is RGB24, else false. + */ + +static inline bool mvx_is_rgb24(enum mvx_format format) +{ + return (format >= MVX_FORMAT_RGB_888) && + (format <= MVX_FORMAT_RGB_888_3P); +} + + +/** + * mvx_is_afbc(): Detect if format is of type AFBC. + * @format: Format. + * + * Return: True if format is AFBC, else false. + */ +static inline bool mvx_is_afbc(enum mvx_format format) +{ + return (format >= MVX_FORMAT_YUV420_AFBC_8) && + (format <= MVX_FORMAT_YUV422_AFBC_10); +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_if_create() - Create IF device. + */ +struct mvx_if_ops *mvx_if_create(struct device *dev, + struct mvx_client_ops *client_ops, + void *priv); + +/** + * mvx_if_destroy() - Destroy IF device. + */ +void mvx_if_destroy(struct mvx_if_ops *if_ops); + +void mvx_if_flush_work(struct mvx_if_ops *if_ops); + +#endif /* _MVX_IF_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_mmu.c b/drivers/media/platform/cix/cix_vpu/if/mvx_mmu.c new file mode 100755 index 000000000000..e5d23cc3e0b5 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_mmu.c @@ -0,0 +1,1346 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include "mvx_mmu.h" +#include "mvx_log_group.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +/* Number of bits for the physical address space. */ +#define MVE_PA_BITS 40 +#define MVE_PA_MASK GENMASK_ULL(MVE_PA_BITS - 1, 0) + +/* Number of bits for the virtual address space. */ +#define MVE_VA_BITS 32 +#define MVE_VA_MASK GENMASK(MVE_VA_BITS - 1, 0) + +/* Number of bits from the VA used to index a PTE in a page. */ +#define MVE_INDEX_SHIFT 10 +#define MVE_INDEX_SIZE (1 << MVE_INDEX_SHIFT) +#define MVE_INDEX_MASK GENMASK(MVE_INDEX_SHIFT - 1, 0) + +/* Access permission defines. */ +#define MVE_PTE_AP_SHIFT 0 +#define MVE_PTE_AP_BITS 2 +#define MVE_PTE_AP_MASK ((1 << MVE_PTE_AP_BITS) - 1) + +/* Physical address defines. */ +#define MVE_PTE_PHYSADDR_SHIFT 2 +#define MVE_PTE_PHYSADDR_BITS 28 +#define MVE_PTE_PHYSADDR_MASK ((1 << MVE_PTE_PHYSADDR_BITS) - 1) + +/* Attributes defines. */ +#define MVE_PTE_ATTR_SHIFT 30 +#define MVE_PTE_ATTR_BITS 2 +#define MVE_PTE_ATTR_MASK ((1 << MVE_PTE_ATTR_BITS) - 1) + +/* Number of levels for Page Table Walk. */ +#define MVE_PTW_LEVELS 2 + +/* + * A Linux physical page can be equal in size or larger than the MVE page size. + * This define calculates how many MVE pages that fit in one Linux page. + */ +#define MVX_PAGES_PER_PAGE (PAGE_SIZE / MVE_PAGE_SIZE) + +/**************************************************************************** + * Types + ****************************************************************************/ + +/** + * struct mvx_mmu_dma_buf - MVX DMA buffer. + * + * Adds a list head to keep track of DMA buffers. + */ +struct mvx_mmu_dma_buf { + struct list_head head; + struct dma_buf *dmabuf; +}; + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +/** + * get_index() - Return the PTE index for a given level. + * @va: Virtual address. + * @level: Level (L1=0, L2=1). + * + * 22 12 0 + * +-------------------+-------------------+-----------------------+ + * | Level 1 | Level 2 | Page offset | + * +-------------------+-------------------+-----------------------+ + */ +static unsigned int get_index(const mvx_mmu_va va, + const unsigned int level) +{ + return (va >> (MVE_PAGE_SHIFT + (MVE_PTW_LEVELS - level - 1) * + MVE_INDEX_SHIFT)) & MVE_INDEX_MASK; +} + +/** + * get_offset() - Return the page offset. + * @va: Virtual address. + * + * 22 12 0 + * +-------------------+-------------------+-----------------------+ + * | Level 1 | Level 2 | Page offset | + * +-------------------+-------------------+-----------------------+ + */ +static unsigned int get_offset(const mvx_mmu_va va) +{ + return va & MVE_PAGE_MASK; +} + +/** + * get_pa() - Return physical address stored in PTE. + */ +static phys_addr_t get_pa(const mvx_mmu_pte pte) +{ + return (((phys_addr_t)pte >> MVE_PTE_PHYSADDR_SHIFT) & + MVE_PTE_PHYSADDR_MASK) << MVE_PAGE_SHIFT; +} + +/* LCOV_EXCL_START */ + +/** + * get_attr() - Return attributes stored in PTE. + */ +static enum mvx_mmu_attr get_attr(const mvx_mmu_pte pte) +{ + return (pte >> MVE_PTE_ATTR_SHIFT) & MVE_PTE_ATTR_MASK; +} + +/** + * get_ap() - Return access permissions stored in PTE. + */ +static enum mvx_mmu_access get_ap(const mvx_mmu_pte pte) +{ + return (pte >> MVE_PTE_AP_SHIFT) & MVE_PTE_AP_MASK; +} + +/* LCOV_EXCL_STOP */ + +/** + * ptw() - Perform Page Table Walk and return pointer to L2 PTE. + * @mmu: Pointer to MMU context. + * @va: Virtual address. + * @alloc: True if missing L2 page should be allocated. + * + * Return: Pointer to PTE, ERR_PTR on error. + */ +static mvx_mmu_pte *ptw(struct mvx_mmu *mmu, + mvx_mmu_va va, + bool alloc) +{ + phys_addr_t l2; + mvx_mmu_pte *pte = mmu->page_table; + unsigned int index; + + /* Level 1. */ + index = get_index(va, 0); + l2 = get_pa(pte[index]); + + /* We should never perform a page table walk for a protected page. */ + if (test_bit(index, mmu->l2_page_is_external) != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "PTW virtual address to secure L2 page. va=0x%x.", + va); + return ERR_PTR(-EINVAL); + } + + /* Map in L2 page if it is missing. */ + if (l2 == 0) { + if (alloc == false) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Missing L2 page in PTW. va=0x%x.", + va); + return ERR_PTR(-EFAULT); + } + + l2 = mvx_mmu_alloc_page(mmu->dev, GFP_KERNEL | __GFP_ZERO); + if (l2 == 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to allocate L2 page. va=0x%x.", + va); + return ERR_PTR(-ENOMEM); + } + + pte[index] = mvx_mmu_set_pte(MVX_ATTR_PRIVATE, l2, + MVX_ACCESS_READ_ONLY); + dma_sync_single_for_device(mmu->dev, + virt_to_phys(&pte[index]), + sizeof(pte[index]), DMA_TO_DEVICE); + } + + /* Level 2. */ + index = get_index(va, 1); + pte = phys_to_virt(l2); + + return &pte[index]; +} + +/** + * map_page() - Map physical- to virtual address. + * @mmu: Pointer to MMU context. + * @va: MVE virtual address to map. + * @pa: Linux kernel physical address to map. + * @attr: MMU attributes. + * @access: MMU access permissions. + * + * Create new L1 and L2 entries if necessary. If mapping already exist, then + * error is returned. + * + * Return: 0 on success, else error code. + */ +static int map_page(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t pa, + enum mvx_mmu_attr attr, + enum mvx_mmu_access access) +{ + mvx_mmu_pte *pte; + phys_addr_t page; + + /* Check that both VA and PA are page aligned. */ + if ((va | pa) & MVE_PAGE_MASK) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "VA and PA must be page aligned. va=0x%x, pa=0x%llx.", + va, pa); + return -EFAULT; + } + + /* Check that VA is within valid address range. */ + if (va & ~MVE_VA_MASK) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "VA out of valid range. va=0x%x.", + va); + return -EFAULT; + } + + /* Check that PA is within valid address range. */ + if (pa & ~MVE_PA_MASK) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "PA out of valid range. pa=0x%llx.", + pa); + return -EFAULT; + } + + pte = ptw(mmu, va, true); + if (IS_ERR(pte)) + return PTR_ERR(pte); + + /* Return error if page already exists. */ + page = get_pa(*pte); + if (page != 0) + return -EAGAIN; + + /* Map in physical address and flush data. */ + *pte = mvx_mmu_set_pte(attr, pa, access); + dma_sync_single_for_device(mmu->dev, virt_to_phys(pte), sizeof(*pte), + DMA_TO_DEVICE); + + return 0; +} + +/** + * unmap_page() - Unmap a page from the virtual address space. + * @mmu: Pointer to MMU context. + * @va: Virtual address. + */ +static void unmap_page(struct mvx_mmu *mmu, + mvx_mmu_va va) +{ + mvx_mmu_pte *pte; + + pte = ptw(mmu, va, false); + if (IS_ERR(pte)) + return; + + /* Unmap virtual address and flush data. */ + *pte = 0; + dma_sync_single_for_device(mmu->dev, virt_to_phys(pte), sizeof(*pte), + DMA_TO_DEVICE); +} + +/** + * remap_page() - Remap virtual address. + * @mmu: Pointer to MMU context. + * @va: MVE virtual address to map. + * @pa: Linux kernel physical address to map. + * @attr: MMU attributes. + * @access: MMU access permissions. + * + * Return: 0 on success, else error code. + */ +static int remap_page(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t pa, + enum mvx_mmu_attr attr, + enum mvx_mmu_access access) +{ + unmap_page(mmu, va); + return map_page(mmu, va, pa, attr, access); +} + +/** + * remap_pages() - Remap virtual address range. + * @pages: Pointer to pages object. + * @oldcount: Count before object was resized. + * + * Return: 0 on success, else error code. + */ +static int remap_pages(struct mvx_mmu_pages *pages, + size_t oldcount) +{ + int ret; + + if (pages->mmu == NULL) + return 0; + + /* Remap pages to no access if new count is smaller than old count. */ + while (pages->count < oldcount) { + oldcount--; + + ret = remap_page(pages->mmu, + pages->va + oldcount * MVE_PAGE_SIZE, + MVE_PAGE_SIZE, MVX_ATTR_PRIVATE, + MVX_ACCESS_NO); + if (ret != 0) + return ret; + } + + /* Map up pages if new count is larger than old count. */ + while (pages->count > oldcount) { + ret = remap_page(pages->mmu, + pages->va + oldcount * MVE_PAGE_SIZE, + pages->pages[oldcount], pages->attr, + pages->access); + if (ret != 0) + return ret; + + oldcount++; + } + + return 0; +} + +/** + * mapped_count() - Check if level 2 table entries point to mmu mapped pages. + * @pa: Physical address of the table entry to be checked. + * + * Return: the number of mapped pages found. + */ +static int mapped_count(phys_addr_t pa) +{ + int count = 0; + + if (pa != 0) { + int j; + phys_addr_t pa2; + mvx_mmu_pte *l2 = phys_to_virt(pa); + + for (j = 0; j < MVE_INDEX_SIZE; j++) { + pa2 = get_pa(l2[j]); + if (pa2 != 0 && pa2 != MVE_PAGE_SIZE) + count++; + } + } + + return count; +} + +/** + * get_sg_table_npages() - Count number of pages in SG table. + * @sgt: Pointer to scatter gather table. + * + * Return: Number of pages. + */ +static size_t get_sg_table_npages(struct sg_table *sgt) +{ + struct sg_page_iter piter; + size_t count = 0; + + for_each_sg_page(sgt->sgl, &piter, sgt->nents, 0) { + count++; + } + + return count; +} + +/** + * append_sg_table() - Append SG table to pages object. + * @pages: Pointer to pages object. + * @sgt: Pointer to scatter gather table. + * + * Return: 0 on success, else error code. + */ +static int append_sg_table(struct mvx_mmu_pages *pages, + struct sg_table *sgt) +{ + size_t count; + struct sg_page_iter piter; + + count = get_sg_table_npages(sgt) * MVX_PAGES_PER_PAGE; + + if ((pages->count + count) > pages->capacity) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to append SG table. Pages capacity too small. count=%zu, capacity=%zu, append=%zu.", + pages->count, pages->capacity, count); + return -ENOMEM; + } + + for_each_sg_page(sgt->sgl, &piter, sgt->nents, 0) { + int j; + phys_addr_t base; + + base = (phys_addr_t)sg_page_iter_dma_address((struct sg_dma_page_iter *)(&piter)) & + PAGE_MASK; + + for (j = 0; j < MVX_PAGES_PER_PAGE; ++j) + pages->pages[pages->count++] = + base + j * MVE_PAGE_SIZE; + } + + return 0; +} + +/** + * stat_show() - Print debugfs info into seq-file. + * + * This is a callback used by debugfs subsystem. + * + * @s: Seq-file + * @v: Unused + * return: 0 on success, else error code. + */ +static int stat_show(struct seq_file *s, + void *v) +{ + struct mvx_mmu_pages *pages = s->private; + + seq_printf(s, "va: %08x\n", pages->va); + seq_printf(s, "capacity: %zu\n", pages->capacity); + seq_printf(s, "count: %zu\n", pages->count); + + if (pages->mmu != NULL) { + seq_printf(s, "attr: %d\n", pages->attr); + seq_printf(s, "access: %d\n", pages->access); + } + + return 0; +} + +/** + * stat_open() - Open debugfs file. + * + * This is a callback used by debugfs subsystem. + * + * @inode: Inode + * @file: File + * return: 0 on success, else error code. + */ +static int stat_open(struct inode *inode, + struct file *file) +{ + return single_open(file, stat_show, inode->i_private); +} + +/** + * File operations for debugfs entry. + */ +static const struct file_operations stat_fops = { + .open = stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +/** + * pages_seq_start() - Iterator over pages list. + */ +static void *pages_seq_start(struct seq_file *s, + loff_t *pos) +{ + struct mvx_mmu_pages *pages = s->private; + + if (*pos >= pages->count) + return NULL; + + seq_puts(s, + "#Page: [ va_start - va_end] -> [ pa_start - pa_end]\n"); + return pos; +} + +/** + * pages_seq_start() - Iterator over pages list. + */ +static void *pages_seq_next(struct seq_file *s, + void *v, + loff_t *pos) +{ + struct mvx_mmu_pages *pages = s->private; + + ++*pos; + if (*pos >= pages->count) + return NULL; + + return pos; +} + +/** + * pages_seq_start() - Iterator over pages list. + */ +static void pages_seq_stop(struct seq_file *s, + void *v) +{} + +/** + * pages_seq_start() - Iterator over pages list. + */ +static int pages_seq_show(struct seq_file *s, + void *v) +{ + struct mvx_mmu_pages *pages = s->private; + loff_t pos = *(loff_t *)v; + + mvx_mmu_va va_start = pages->va + pos * MVE_PAGE_SIZE; + mvx_mmu_va va_end = va_start + MVE_PAGE_SIZE - 1; + phys_addr_t pa_start = pages->pages[pos]; + phys_addr_t pa_end = pa_start + MVE_PAGE_SIZE - 1; + + seq_printf(s, "%5llu: [0x%08x - 0x%08x] -> [%pap - %pap]\n", pos, + va_start, va_end, &pa_start, &pa_end); + return 0; +} + +/** + * mpages_seq_ops - Callbacks used by an iterator over pages list. + */ +static const struct seq_operations pages_seq_ops = { + .start = pages_seq_start, + .next = pages_seq_next, + .stop = pages_seq_stop, + .show = pages_seq_show +}; + +/** + * list_open() - Callback for debugfs entry. + */ +static int list_open(struct inode *inode, + struct file *file) +{ + int ret; + struct seq_file *s; + + ret = seq_open(file, &pages_seq_ops); + if (ret != 0) + return ret; + + s = (struct seq_file *)file->private_data; + s->private = inode->i_private; + + return 0; +} + +/** + * File operations for a debugfs entry. + */ +static const struct file_operations list_fops = { + .open = list_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_mmu_construct(struct mvx_mmu *mmu, + struct device *dev) +{ + phys_addr_t page_table; + + mmu->dev = dev; + + /* Allocate Page Table Base (the L1 table). */ + page_table = mvx_mmu_alloc_page(dev, GFP_KERNEL | __GFP_ZERO); + if (page_table == 0) + return -ENOMEM; + + mmu->page_table = phys_to_virt(page_table); + + return 0; +} + +void mvx_mmu_destruct(struct mvx_mmu *mmu) +{ + mvx_mmu_pte *pte = mmu->page_table; + phys_addr_t pa; + int i; + int count = 0; + + for (i = 0; i < MVE_INDEX_SIZE; i++) { + pa = get_pa(pte[i]); + + /* Only free pages we have allocated ourselves. */ + if (test_bit(i, mmu->l2_page_is_external) == 0) { + count += mapped_count(pa); + mvx_mmu_free_page(mmu->dev, pa); + } + } + + pa = virt_to_phys(mmu->page_table); + mvx_mmu_free_page(mmu->dev, pa); + + WARN_ON(count > 0); +} + +phys_addr_t mvx_mmu_alloc_page(struct device *dev, gfp_t gfp_mask) +{ + struct page *page; + phys_addr_t pa; + dma_addr_t dma_handle; + + page = alloc_page(gfp_mask); + if (page == NULL) + return 0; + + dma_handle = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_handle) != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Cannot map page to DMA address space. page=%px.", + page); + goto free_page; + } + + pa = (phys_addr_t)dma_handle; + + if (gfp_mask & __GFP_ZERO) + dma_sync_single_for_device(dev, pa, PAGE_SIZE, DMA_TO_DEVICE); + + return pa; + +free_page: + __free_page(page); + return 0; +} + +void mvx_mmu_free_contiguous_pages(struct device *dev, phys_addr_t pa, size_t npages) +{ + struct page *page; + + if (pa == 0) + return; + + page = phys_to_page(pa); + + dma_unmap_page(dev, pa, npages << PAGE_SHIFT, DMA_BIDIRECTIONAL); + __free_pages(page, get_order(npages << PAGE_SHIFT)); +} + +phys_addr_t mvx_mmu_alloc_contiguous_pages(struct device *dev, size_t npages, gfp_t gfp_mask) +{ + struct page *page; + phys_addr_t pa; + dma_addr_t dma_handle; + size_t size = (npages << PAGE_SHIFT); + + page = alloc_pages(gfp_mask, get_order(size)); + if (page == NULL) + return 0; + + dma_handle = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, dma_handle) != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Cannot map page to DMA address space. page=%px.", + page); + goto free_pages; + } + + pa = (phys_addr_t)dma_handle; + + if (gfp_mask & __GFP_ZERO) + dma_sync_single_for_device(dev, pa, size, DMA_TO_DEVICE); + + return pa; + +free_pages: + __free_pages(page, get_order(size)); + return 0; +} + +void mvx_mmu_free_noncontiguous(struct device *dev, + struct mvx_mmu_pages *pages, struct sg_table *sgt, void **data, size_t size) +{ + dma_vunmap_noncontiguous(dev, *data); + dma_free_noncontiguous(dev, size, sgt, DMA_FROM_DEVICE); + vfree(pages); +} + +void* mvx_mmu_alloc_noncontiguous(struct device *dev, + struct mvx_mmu_pages **pages, struct sg_table **sgt, size_t size, gfp_t gfp_mask) +{ + void *data; + struct mvx_mmu_pages *tmp_pages; + struct sg_table *tmp_sgt; + + tmp_sgt = dma_alloc_noncontiguous(dev, + size, DMA_FROM_DEVICE, gfp_mask, DMA_ATTR_ALLOC_SINGLE_PAGES); + if (tmp_sgt == NULL) + return NULL; + + data = dma_vmap_noncontiguous(dev, size, tmp_sgt); + if (data == NULL) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Cannot map sg_table to DMA address space"); + goto free_sg_table; + } + tmp_pages = mvx_mmu_alloc_pages_sg(dev, + tmp_sgt, DIV_ROUND_UP(size, MVE_PAGE_SIZE)); + if (IS_ERR(tmp_pages)) + goto free_sg_table; + + *pages = tmp_pages; + *sgt = tmp_sgt; + return data; + +free_sg_table: + dma_free_noncontiguous(dev, size, tmp_sgt, DMA_FROM_DEVICE); + return NULL; +} + +void mvx_mmu_free_page(struct device *dev, + phys_addr_t pa) +{ + struct page *page; + + if (pa == 0) + return; + + page = phys_to_page(pa); + + dma_unmap_page(dev, pa, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); +} + +struct mvx_mmu_pages *mvx_mmu_alloc_pages(struct device *dev, + size_t count, + size_t capacity, + gfp_t gfp_mask) +{ + struct mvx_mmu_pages *pages; + int ret; + + count = roundup(count, MVX_PAGES_PER_PAGE); + capacity = roundup(capacity, MVX_PAGES_PER_PAGE); + capacity = max(count, capacity); + + pages = vmalloc(sizeof(*pages) + sizeof(phys_addr_t) * capacity); + memset(pages, 0, sizeof(*pages) + sizeof(phys_addr_t) * capacity); + + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + pages->dev = dev; + pages->capacity = capacity; + INIT_LIST_HEAD(&pages->dmabuf); + + for (pages->count = 0; pages->count < count; ) { + phys_addr_t page; + unsigned int i; + + /* + * Allocate a Linux page. It will typically be of the same size + * as the MVE page, but could also be larger. + */ + page = mvx_mmu_alloc_page(dev, gfp_mask); + if (page == 0) { + ret = -ENOMEM; + goto release_pages; + } + + /* + * If the Linux page is larger than the MVE page, then + * we iterate and add physical addresses with an offset from + * the Linux page. + */ + for (i = 0; i < MVX_PAGES_PER_PAGE; i++) + pages->pages[pages->count++] = + page + i * MVE_PAGE_SIZE; + } + + return pages; + +release_pages: + vfree(pages); + + return ERR_PTR(ret); +} + +struct mvx_mmu_pages *mvx_mmu_alloc_pages_sg(struct device *dev, + struct sg_table *sgt, + size_t capacity) +{ + struct mvx_mmu_pages *pages; + size_t count; + int ret; + + count = get_sg_table_npages(sgt) * MVX_PAGES_PER_PAGE; + capacity = roundup(capacity, MVX_PAGES_PER_PAGE); + capacity = max(count, capacity); + + pages = vmalloc(sizeof(*pages) + sizeof(phys_addr_t) * capacity); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + memset(pages, 0, sizeof(*pages) + sizeof(phys_addr_t) * capacity); + + pages->dev = dev; + pages->capacity = capacity; + pages->is_external = true; + pages->offset = sgt->sgl != NULL ? sgt->sgl->offset : 0; + INIT_LIST_HEAD(&pages->dmabuf); + + ret = append_sg_table(pages, sgt); + if (ret != 0) { + vfree(pages); + return ERR_PTR(ret); + } + + return pages; +} + +struct mvx_mmu_pages *mvx_mmu_alloc_pages_dma_buf(struct device *dev, + struct dma_buf *dmabuf, + size_t capacity) +{ + struct mvx_mmu_pages *pages; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct mvx_mmu_dma_buf *mbuf; + + attach = dma_buf_attach(dmabuf, dev); + if (IS_ERR(attach)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to attach DMA buffer."); + return (struct mvx_mmu_pages *)attach; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to get SG table from DMA buffer."); + pages = (struct mvx_mmu_pages *)sgt; + goto detach; + } + + pages = mvx_mmu_alloc_pages_sg(dev, sgt, capacity); + if (IS_ERR(pages)) + goto unmap; + + mbuf = devm_kzalloc(dev, sizeof(*mbuf), GFP_KERNEL); + if (mbuf == NULL) { + mvx_mmu_free_pages(pages); + pages = ERR_PTR(-ENOMEM); + goto unmap; + } + + mbuf->dmabuf = dmabuf; + list_add_tail(&mbuf->head, &pages->dmabuf); + +unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + +detach: + dma_buf_detach(dmabuf, attach); + + return pages; +} + +int mvx_mmu_pages_append_dma_buf(struct mvx_mmu_pages *pages, + struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct mvx_mmu_dma_buf *mbuf; + size_t oldcount = pages->count; + int ret; + + if (pages->is_external == false) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Can't append DMA buffer to internal pages object."); + return -EINVAL; + } + + attach = dma_buf_attach(dmabuf, pages->dev); + if (IS_ERR(attach)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to attach DMA buffer."); + return PTR_ERR(attach); + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to get SG table from DMA buffer."); + ret = PTR_ERR(sgt); + goto detach; + } + + ret = append_sg_table(pages, sgt); + if (ret != 0) + goto unmap; + + ret = remap_pages(pages, oldcount); + if (ret != 0) + goto unmap; + + mbuf = devm_kzalloc(pages->dev, sizeof(*mbuf), GFP_KERNEL); + if (mbuf == NULL) { + ret = -ENOMEM; + goto unmap; + } + + mbuf->dmabuf = dmabuf; + list_add_tail(&mbuf->head, &pages->dmabuf); + +unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + +detach: + dma_buf_detach(dmabuf, attach); + + return ret; +} + +int mvx_mmu_resize_pages(struct mvx_mmu_pages *pages, + size_t npages) +{ + size_t oldcount = pages->count; + + if (pages->is_external != false) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "MMU with externally managed pages cannot be resized."); + return -EINVAL; + } + + npages = roundup(npages, MVX_PAGES_PER_PAGE); + + if (npages > pages->capacity) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "New MMU pages size is larger than capacity. npages=%zu, capacity=%zu.", + npages, pages->capacity); + return -ENOMEM; + } + + /* Free pages if npage is smaller than allocated pages. */ + while (pages->count > npages) { + pages->count--; + + if ((pages->count % MVX_PAGES_PER_PAGE) == 0) + mvx_mmu_free_page(pages->dev, + pages->pages[pages->count]); + + pages->pages[pages->count] = 0; + } + + /* Allocate pages if npage is larger than allocated pages. */ + while (pages->count < npages) { + phys_addr_t page; + unsigned int i; + + page = mvx_mmu_alloc_page(pages->dev, GFP_KERNEL); + if (page == 0) + return -ENOMEM; + + for (i = 0; i < MVX_PAGES_PER_PAGE; i++) + pages->pages[pages->count++] = + page + i * MVE_PAGE_SIZE; + } + + return remap_pages(pages, oldcount); +} + +void mvx_mmu_free_pages(struct mvx_mmu_pages *pages) +{ + struct mvx_mmu_dma_buf *mbuf; + struct mvx_mmu_dma_buf *tmp; + unsigned int i; + + mvx_mmu_unmap_pages(pages); + + if (pages->is_external == false) + for (i = 0; i < pages->count; i += MVX_PAGES_PER_PAGE) + mvx_mmu_free_page(pages->dev, pages->pages[i]); + + list_for_each_entry_safe(mbuf, tmp, &pages->dmabuf, head) { + dma_buf_put(mbuf->dmabuf); + devm_kfree(pages->dev, mbuf); + } + + vfree(pages); +} + +size_t mvx_mmu_size_pages(struct mvx_mmu_pages *pages) +{ + return pages->count * MVE_PAGE_SIZE; +} + +int mvx_mmu_synch_pages(struct mvx_mmu_pages *pages, + enum dma_data_direction dir) +{ + size_t i; + + if (dir == DMA_FROM_DEVICE) { + for (i = 0; i < pages->count; i += MVX_PAGES_PER_PAGE) + dma_sync_single_for_cpu(pages->dev, pages->pages[i], + PAGE_SIZE, DMA_FROM_DEVICE); + } else if (dir == DMA_TO_DEVICE) { + for (i = 0; i < pages->count; i += MVX_PAGES_PER_PAGE) + dma_sync_single_for_device(pages->dev, pages->pages[i], + PAGE_SIZE, DMA_TO_DEVICE); + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported MMU flush direction. dir=%u.", + dir); + return -EINVAL; + } + + return 0; +} + +int mvx_mmu_map_pages(struct mvx_mmu *mmu, + mvx_mmu_va va, + struct mvx_mmu_pages *pages, + enum mvx_mmu_attr attr, + enum mvx_mmu_access access, + mvx_mmu_va *tried_size) +{ + size_t i; + int ret; + + /* Map the allocated pages. */ + for (i = 0; i < pages->count; i++) { + ret = map_page(mmu, va + i * MVE_PAGE_SIZE, pages->pages[i], + attr, access); + if (ret != 0) + goto unmap_pages; + } + + /* + * Reserve the rest of the address range. Adding a dummy page with + * physical address 'PAGE_SIZE' should not lead to memory corruption, + * because the page is marked as 'no access'. + */ + for (; i < pages->capacity; i++) { + ret = map_page(mmu, va + i * MVE_PAGE_SIZE, MVE_PAGE_SIZE, + MVX_ATTR_PRIVATE, MVX_ACCESS_NO); + if (ret != 0) + goto unmap_pages; + } + + pages->mmu = mmu; + pages->va = va; + pages->attr = attr; + pages->access = access; + + return 0; + +unmap_pages: + if (tried_size != NULL) + *tried_size = (i + 1) * MVE_PAGE_SIZE; + while (i-- > 0) + unmap_page(mmu, va + i * MVE_PAGE_SIZE); + + return ret; +} + +void mvx_mmu_unmap_pages(struct mvx_mmu_pages *pages) +{ + size_t i; + + if (pages->mmu == NULL) + return; + + for (i = 0; i < pages->capacity; i++) + unmap_page(pages->mmu, pages->va + i * MVE_PAGE_SIZE); + + pages->mmu = NULL; + pages->va = 0; +} + +int mvx_mmu_map_pa(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t pa, + size_t size, + enum mvx_mmu_attr attr, + enum mvx_mmu_access access) +{ + int ret; + size_t offset; + + for (offset = 0; offset < size; offset += MVE_PAGE_SIZE) { + ret = map_page(mmu, va + offset, pa + offset, + attr, access); + if (ret != 0) + goto unmap_pages; + } + + return 0; + +unmap_pages: + /* Unroll mapped pages. */ + while (offset > 0) { + offset -= MVE_PAGE_SIZE; + unmap_page(mmu, va + offset); + } + + return ret; +} + +int mvx_mmu_map_l2(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t pa) +{ + phys_addr_t l2; + mvx_mmu_pte *pte = mmu->page_table; + unsigned int index; + + /* Level 1. */ + index = get_index(va, 0); + l2 = get_pa(pte[index]); + + if (l2 != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Failed to map L2 page. Page already exists."); + return -EINVAL; + } + + set_bit(index, mmu->l2_page_is_external); + + pte[index] = mvx_mmu_set_pte(MVX_ATTR_PRIVATE, pa, + MVX_ACCESS_READ_ONLY); + dma_sync_single_for_device(mmu->dev, + virt_to_phys(&pte[index]), + sizeof(pte[index]), DMA_TO_DEVICE); + + return 0; +} + +void mvx_mmu_unmap_va(struct mvx_mmu *mmu, + mvx_mmu_va va, + size_t size) +{ + size_t offset; + + for (offset = 0; offset < size; offset += MVE_PAGE_SIZE) + unmap_page(mmu, va + offset); +} + +int mvx_mmu_va_to_pa(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t *pa) +{ + mvx_mmu_pte *pte; + phys_addr_t page; + + pte = ptw(mmu, va, false); + if (IS_ERR(pte)) + return PTR_ERR(pte); + + page = get_pa(*pte); + if (page == 0) + return -EFAULT; + + *pa = page | get_offset(va); + + return 0; +} + +/* LCOV_EXCL_START */ +int mvx_mmu_read(struct mvx_mmu *mmu, + mvx_mmu_va va, + void *data, + size_t size) +{ + mvx_mmu_va end = va + size; + + while (va < end) { + int ret; + size_t n; + phys_addr_t pa = 0; + void *src; + + /* Calculate number of bytes to be copied. */ + n = min(end - va, MVE_PAGE_SIZE - (va & MVE_PAGE_MASK)); + + /* Translate virtual- to physical address. */ + ret = mvx_mmu_va_to_pa(mmu, va, &pa); + if (ret != 0) + return ret; + + /* Invalidate the data range. */ + dma_sync_single_for_cpu(mmu->dev, pa, n, DMA_FROM_DEVICE); + + /* Convert from physical to Linux logical address. */ + src = phys_to_virt(pa); + memcpy(data, src, n); + + va += n; + data += n; + } + + return 0; +} + +/* LCOV_EXCL_STOP */ + +int mvx_mmu_write(struct mvx_mmu *mmu, + mvx_mmu_va va, + const void *data, + size_t size) +{ + mvx_mmu_va end = va + size; + + while (va < end) { + int ret; + size_t n; + phys_addr_t pa = 0; + void *dst; + + /* Calculate number of bytes to be copied. */ + n = min(end - va, MVE_PAGE_SIZE - (va & MVE_PAGE_MASK)); + + /* Translate virtual- to physical address. */ + ret = mvx_mmu_va_to_pa(mmu, va, &pa); + if (ret != 0) + return ret; + + /* Convert from physical to Linux logical address. */ + dst = phys_to_virt(pa); + memcpy(dst, data, n); + + /* Flush the data to memory. */ + dma_sync_single_for_device(mmu->dev, pa, n, DMA_TO_DEVICE); + + va += n; + data += n; + } + + return 0; +} + +mvx_mmu_pte mvx_mmu_set_pte(enum mvx_mmu_attr attr, + phys_addr_t pa, + enum mvx_mmu_access access) +{ + return (attr << MVE_PTE_ATTR_SHIFT) | + ((pa >> MVE_PAGE_SHIFT) << MVE_PTE_PHYSADDR_SHIFT) | + (access << MVE_PTE_AP_SHIFT); +} + +/* LCOV_EXCL_START */ +void mvx_mmu_print(struct mvx_mmu *mmu) +{ + unsigned int i; + mvx_mmu_pte *l1 = mmu->page_table; + + for (i = 0; i < MVE_INDEX_SIZE; i++) { + phys_addr_t pa = get_pa(l1[i]); + unsigned int j; + + if (pa != 0) { + mvx_mmu_pte *l2 = phys_to_virt(pa); + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "%-4u: PA=0x%llx, ATTR=%u, ACC=%u", + i, pa, get_attr(l1[i]), get_ap(l1[i])); + + for (j = 0; j < MVE_INDEX_SIZE; j++) { + pa = get_pa(l2[j]); + if (pa != 0) { + mvx_mmu_va va; + + va = (i << (MVE_INDEX_SHIFT + + MVE_PAGE_SHIFT)) | + (j << MVE_PAGE_SHIFT); + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "|------ %-4u: VA=0x%08x, PA=0x%llx, ATTR=%u, ACC=%u", + j, + va, + pa, + get_attr(l2[j]), + get_ap(l2[j])); + } + } + } + } +} + +/* LCOV_EXCL_STOP */ + +int mvx_mmu_pages_debugfs_init(struct mvx_mmu_pages *pages, + char *name, + struct dentry *parent) +{ + struct dentry *dpages; + struct dentry *dentry; + + dpages = debugfs_create_dir(name, parent); + if (IS_ERR_OR_NULL(dpages)) + return -ENOMEM; + + dentry = debugfs_create_file("stat", 0400, dpages, pages, + &stat_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + dentry = debugfs_create_file("list", 0400, dpages, pages, + &list_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + return 0; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_mmu.h b/drivers/media/platform/cix/cix_vpu/if/mvx_mmu.h new file mode 100755 index 000000000000..5aab846f5993 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_mmu.h @@ -0,0 +1,481 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_MMU_H_ +#define _MVX_MMU_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include + +/**************************************************************************** + * Defines + ****************************************************************************/ + +/* Page size in bits. 2^12 = 4kB. */ +#define MVE_PAGE_SHIFT 12 +#define MVE_PAGE_SIZE (1 << MVE_PAGE_SHIFT) +#define MVE_PAGE_MASK (MVE_PAGE_SIZE - 1) + +/* Number of page table entries per page. */ +#define MVE_PAGE_PTE_PER_PAGE (MVE_PAGE_SIZE / sizeof(mvx_mmu_pte)) + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct dma_buf; +struct mvx_mmu; +struct page; + +/** + * typedef mvx_mmu_va - 32 bit virtual address. + * + * This is the address the firmware/MVE will use. + */ +typedef uint32_t mvx_mmu_va; + +/** + * typedef mvx_mmu_pte - Page table entry. + * + * A PTE pointer should always point at a Linux kernel virtual address. + * + * AT - Attribute. + * PA - Physical address. + * AP - Access permission. + * + * 30 2 0 + * +---+-------------------------------------------------------+---+ + * | AT| PA 39:12 | AP| + * +---+-------------------------------------------------------+---+ + */ +typedef uint32_t mvx_mmu_pte; + +enum mvx_mmu_attr { + MVX_ATTR_PRIVATE = 0, + MVX_ATTR_SHARED_COHERENT = 1, + MVX_ATTR_SHARED_RO = 2, + MVX_ATTR_SHARED_RW = 3 +}; + +enum mvx_mmu_access { + MVX_ACCESS_NO = 0, + MVX_ACCESS_READ_ONLY = 1, + MVX_ACCESS_EXECUTABLE = 2, + MVX_ACCESS_READ_WRITE = 3 +}; + +/** + * struct mvx_mmu_pages - Structure used to allocate an array of pages. + * @dev: Pointer to device. + * @node: Hash table node. Used to keep track of allocated pages objects. + * @mmu: Pointer to MMU instance. + * @va: MVE virtual address. Set to 0 if objects is unmapped. + * @offset: Offset from mapped VA to where the data begins. + * @attr: Page table attributes. + * @access: Page table access. + * @capacity: Maximum number of MVE pages this object can hold. + * @count: Current number of allocated pages. + * @is_external:If the physical pages have been externally allocated. + * @dmabuf: List of DMA buffers. + * @pages: Array of pages. + */ +struct mvx_mmu_pages { + struct device *dev; + struct hlist_node node; + struct mvx_mmu *mmu; + mvx_mmu_va va; + size_t offset; + enum mvx_mmu_attr attr; + enum mvx_mmu_access access; + size_t capacity; + size_t count; + bool is_external; + struct list_head dmabuf; + phys_addr_t pages[0]; +}; + +/** + * struct mvx_mmu - MMU context. + * @dev: Pointer to device. + * @page_table: Virtual address to L1 page. + * @l2_page_is_external: Bitmap of which L2 pages that have been mapped + * externally. + */ +struct mvx_mmu { + struct device *dev; + mvx_mmu_pte *page_table; + DECLARE_BITMAP(l2_page_is_external, MVE_PAGE_PTE_PER_PAGE); +}; + +/**************************************************************************** + * Static functions + ****************************************************************************/ + +#ifndef phys_to_page + +/** + * phys_to_page() - Convert a physical address to a pointer to a page. + * @pa: Physical address. + * + * Return: Pointer to page struct. + */ +static inline struct page *phys_to_page(unsigned long pa) +{ + return pfn_to_page(__phys_to_pfn(pa)); +} + +#endif + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_mmu_construct() - Construct the MMU object. + * @mmu: Pointer to MMU object. + * @dev: Pointer to device. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_construct(struct mvx_mmu *mmu, + struct device *dev); + +/** + * mvx_mmu_destruct() - Destroy the MMU object. + * @mmu: Pointer to MMU object. + */ +void mvx_mmu_destruct(struct mvx_mmu *mmu); + +/** + * mvx_mmu_alloc_noncontiguous() - Allocate pages with noncontiguous virt addr. + * dev: Pointer to device. + * pages: mmu pages to alloc + * sgt: sg_table to alloc + * size: Number of bytes to allocate. + * gfp_mask: GFP bitmasks. + * Return: Physical page address on success, else 0. + */ +void* mvx_mmu_alloc_noncontiguous(struct device *dev, + struct mvx_mmu_pages **pages, struct sg_table **sgt, size_t size, gfp_t gfp_mask); + +/* + * mvx_mmu_free_noncontiguous() - Free noncontiguous virt addr pages. + * + * dev: device to free memory + * pages: mmu pages to free + * sgt: sg_table to free + * data: mapped data + * size: allocated size for free + */ +void mvx_mmu_free_noncontiguous(struct device *dev, + struct mvx_mmu_pages *pages, struct sg_table *sgt, void **data, size_t size); + +/** + * mvx_mmu_alloc_contiguous_pages() - Allocate contiguous pages. + * dev: Pointer to device. + * npages: Number of pages to allocate. + * gfp_mask: GFP bitmasks. + * Return: Physical page address on success, else 0. + */ +phys_addr_t mvx_mmu_alloc_contiguous_pages(struct device *dev, size_t npages, gfp_t gfp_mask); + +/* + * mvx_mmu_free_contiguous_pages() - Free contiguous pages. + * + * dev: Pointer to device. + * pa: Physical page address or 0. + * npages: Number of pages to free. + */ +void mvx_mmu_free_contiguous_pages(struct device *dev, phys_addr_t pa, size_t npages); + +/** + * mvx_mmu_alloc_page() - Allocate one page. + * dev: Pointer to device. + * gfp_mask: GFP bitmasks. + * Return: Physical page address on success, else 0. + */ +phys_addr_t mvx_mmu_alloc_page(struct device *dev, gfp_t gfp_mask); + +/* + * mvx_mmu_free_page() - Free one page. + * + * dev: Pointer to device. + * pa: Physical page address or 0. + */ +void mvx_mmu_free_page(struct device *dev, + phys_addr_t pa); + +/** + * mvx_mmu_alloc_pages() - Allocate array of pages. + * @dev: Pointer to device. + * @npages Number of pages to allocate. + * @capacity: Maximum number of pages this allocation can be resized + * to. If this value is 0 or smaller than npages, then it will be + * set to npages. + *@gfp_mask: GFP bitmasks. + * + * Pages are not guaranteed to be physically continuous. + * + * Return: Valid pointer on success, else ERR_PTR. + */ +struct mvx_mmu_pages *mvx_mmu_alloc_pages(struct device *dev, + size_t npages, + size_t capacity, + gfp_t gfp_mask); + +/** + * mvx_mmu_alloc_pages_sg() - Allocate array of pages from SG table. + * @dev: Pointer to device. + * @sgt: Scatter-gatter table with pre-allocated memory pages. + * @capacity: Maximum number of pages this allocation can be resized + * to. If this value is 0 or smaller than number of pages + * in scatter gather table, then it will be rounded up to + * to SG table size. + * + * Pages are not guaranteed to be physically continuous. + * + * Return: Valid pointer on success, else ERR_PTR. + */ +struct mvx_mmu_pages *mvx_mmu_alloc_pages_sg(struct device *dev, + struct sg_table *sgt, + size_t capacity); + +/** + * mvx_mmu_alloc_pages_dma_buf() - Allocate pages object from DMA buffer. + * @dev: Pointer to device. + * @dma_buf: Pointer to DMA buffer. + * @capacity: Maximum number of pages this allocation can be resized + * to. If this value is 0 or smaller than number of pages + * in DMA buffer, then it will be rounded up to DMA buffer + * size. + * + * The pages object will take ownership of the DMA buffer and call + * dma_put_buf() when the pages object is destructed. + * + * Return: Valid pointer on success, else ERR_PTR. + */ +struct mvx_mmu_pages *mvx_mmu_alloc_pages_dma_buf(struct device *dev, + struct dma_buf *dmabuf, + size_t capacity); + +/** + * mvx_mmu_pages_append_dma_buf() - Append DMA buffer to pages object. + * @pages: Pointer to pages object. + * @dma_buf: Pointer to DMA buffer. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_pages_append_dma_buf(struct mvx_mmu_pages *pages, + struct dma_buf *dmabuf); + +/** + * mvx_mmu_resize_pages() - Resize the page allocation. + * @pages: Pointer to pages object. + * @npages: Number of pages to allocate. + * + * If the number of pages is smaller, then pages will be freed. + * + * If the number of pages is larger, then additional memory will be allocated. + * The already allocates pages will keep their physical addresses. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_resize_pages(struct mvx_mmu_pages *pages, + size_t npages); + +/** + * mvx_mmu_free_pages() - Free pages. + * @pages: Pointer to pages object. + */ +void mvx_mmu_free_pages(struct mvx_mmu_pages *pages); + +/** + * mvx_mmu_size_pages() - Get number of allocated bytes. + * @pages: Pointer to pages object. + * + * Return: Size in bytes of pages. + */ +size_t mvx_mmu_size_pages(struct mvx_mmu_pages *pages); + +/** + * mvx_buffer_synch() - Synch data caches. + * @pages: Pointer to pages object. + * @dir: Which direction to synch. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_synch_pages(struct mvx_mmu_pages *pages, + enum dma_data_direction dir); + +/** + * mvx_mmu_map_pages() - Map an array of pages to a virtual address. + * @mmu: Pointer to MMU object. + * @va: Virtual address. + * @pages: Pointer to pages object. + * @attr: Bus attributes. + * @access: Access permission. + * @tried_size: Pointer to tried bytes when map failed. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_map_pages(struct mvx_mmu *mmu, + mvx_mmu_va va, + struct mvx_mmu_pages *pages, + enum mvx_mmu_attr attr, + enum mvx_mmu_access access, + mvx_mmu_va *tried_size); + +/** + * mvx_mmu_unmap_pages() - Unmap pages object. + * @pages: Pointer to pages object. + */ +void mvx_mmu_unmap_pages(struct mvx_mmu_pages *pages); + +/** + * mvx_mmu_map_pa() - Map a physical- to a virtual address. + * @mmu: Pointer to MMU object. + * @va: Virtual address. + * @pa: Physical address. + * @size: Size of area to map. + * @attr: Bus attributes. + * @access: Access permission. + * + * Both the VA and PA must be page aligned. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_map_pa(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t pa, + size_t size, + enum mvx_mmu_attr attr, + enum mvx_mmu_access access); + +/** + * mvx_mmu_map_l2() - Map a L2 page. + * @mmu: Pointer to MMU object. + * @va: Virtual address. + * @pa: Physical address. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_map_l2(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t pa); + +/** + * mvx_mmu_unmap_va() - Unmap a virtual address range. + * @mmu: Pointer to MMU object. + * @va: Virtual address. + * @size: Size of area to unmap. + */ +void mvx_mmu_unmap_va(struct mvx_mmu *mmu, + mvx_mmu_va va, + size_t size); + +/** + * mvx_mmu_va_to_pa() - Map a virtual- to a physical address. + * @mmu: Pointer to MMU object. + * @va: Virtual address. + * @pa: Pointer to physical address. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_va_to_pa(struct mvx_mmu *mmu, + mvx_mmu_va va, + phys_addr_t *pa); + +/** + * mvx_mmu_read() - Read size bytes from virtual address. + * @mmu: Pointer to MMU object. + * @va: Source virtual address. + * @data: Pointer to destination data. + * @size: Number of bytes to copy. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_read(struct mvx_mmu *mmu, + mvx_mmu_va va, + void *data, + size_t size); + +/** + * mvx_mmu_write() - Write size bytes to virtual address. + * @mmu: Pointer to MMU object. + * @va: Destination virtual address. + * @data: Pointer to source data. + * @size: Number of bytes to copy. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_write(struct mvx_mmu *mmu, + mvx_mmu_va va, + const void *data, + size_t size); + +/** + * mvx_mmu_set_pte() - Construct PTE and return PTE value. + * @attr: Bus attributes. + * @pa: Physical address. + * @access: Access permission. + * + * Return: Page table entry. + */ +mvx_mmu_pte mvx_mmu_set_pte(enum mvx_mmu_attr attr, + phys_addr_t pa, + enum mvx_mmu_access access); + +/** + * mvx_mmu_print() - Print the MMU table. + * @mmu: Pointer to MMU object. + */ +void mvx_mmu_print(struct mvx_mmu *mmu); + +/** + * mvx_mmu_pages_debugfs_init() - Init debugfs entry. + * @pages: Pointer to MMU pages. + * @name: Name of debugfs entry. + * @parent: Parent debugfs entry. + * + * Return: 0 on success, else error code. + */ +int mvx_mmu_pages_debugfs_init(struct mvx_mmu_pages *pages, + char *name, + struct dentry *parent); + +#endif /* _MVX_MMU_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_secure.c b/drivers/media/platform/cix/cix_vpu/if/mvx_secure.c new file mode 100755 index 000000000000..16939c5b944a --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_secure.c @@ -0,0 +1,412 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include "mvx_log_group.h" +#include "mvx_secure.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +#pragma pack(push, 1) +struct secure_firmware_desc { + int32_t fd; + uint64_t l2pages; + struct + { + uint32_t major; + uint32_t minor; + } protocol; +}; +#pragma pack(pop) + +struct mvx_secure_firmware_priv { + struct device *dev; + struct kobject kobj; + struct work_struct work; + wait_queue_head_t wait_queue; + struct mvx_secure_firmware fw; + mvx_secure_firmware_done done; + void *done_arg; +}; + +struct mvx_secure_mem { + struct device *dev; + struct kobject kobj; + wait_queue_head_t wait_queue; + struct dma_buf *dmabuf; +}; + +/**************************************************************************** + * Secure + ****************************************************************************/ + +int mvx_secure_construct(struct mvx_secure *secure, + struct device *dev) +{ + secure->dev = dev; + secure->kset = kset_create_and_add("securevideo", NULL, &dev->kobj); + if (secure->kset == NULL) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to create securevideo kset."); + return -EINVAL; + } + + secure->workqueue = alloc_workqueue("mvx_securevideo", + WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (secure->workqueue == NULL) { + kset_unregister(secure->kset); + return -EINVAL; + } + + return 0; +} + +void mvx_secure_destruct(struct mvx_secure *secure) +{ + destroy_workqueue(secure->workqueue); + kset_unregister(secure->kset); +} + +/**************************************************************************** + * Secure firmware + ****************************************************************************/ + +/** + * firmware_store() - Firmware sysfs store function. + * + * Store values from firmware descriptor, get the DMA handle and wake up any + * waiting process. + */ +static ssize_t firmware_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t size) +{ + struct mvx_secure_firmware_priv *securefw = + container_of(kobj, struct mvx_secure_firmware_priv, kobj); + const struct secure_firmware_desc *desc = (const void *)buf; + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Loaded secure firmware. fd=%d, l2=0x%llx, major=%u, minor=%u.", + desc->fd, desc->l2pages, desc->protocol.major, + desc->protocol.minor); + + securefw->fw.l2pages = desc->l2pages; + securefw->fw.protocol.major = desc->protocol.major; + securefw->fw.protocol.minor = desc->protocol.minor; + securefw->fw.dmabuf = dma_buf_get(desc->fd); + if (IS_ERR_OR_NULL(securefw->fw.dmabuf)) + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to get DMA buffer from fd. fd=%d.", + desc->fd); + + wake_up_interruptible(&securefw->wait_queue); + + return size; +} + +/** + * secure_firmware_release() - Release secure firmware. + * kobj: Pointer to kobject. + */ +static void secure_firmware_release(struct kobject *kobj) +{ + struct mvx_secure_firmware_priv *securefw = + container_of(kobj, struct mvx_secure_firmware_priv, kobj); + + if (IS_ERR_OR_NULL(securefw->fw.dmabuf) == false) + dma_buf_put(securefw->fw.dmabuf); + + devm_kfree(securefw->dev, securefw); +} + +/** + * secure_firmware_wait() - Wait for firmware load. + * @work: Pointer to work member in mvx_secure_firmware_priv. + * + * Worker thread used to wait for a secure firmware load to complete. + */ +static void secure_firmware_wait(struct work_struct *work) +{ + struct mvx_secure_firmware_priv *securefw = + container_of(work, struct mvx_secure_firmware_priv, work); + int ret; + + ret = wait_event_interruptible_timeout(securefw->wait_queue, + securefw->fw.dmabuf != NULL, + msecs_to_jiffies(10000)); + if (ret == 0) + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Firmware load timed out."); + + kobject_del(&securefw->kobj); + + if (securefw->done != NULL) + securefw->done(&securefw->fw, securefw->done_arg); +} + +/** + * secure_firmware_create() - Create a secure firmware object. + * @secure: Pointer to secure context. + * @name: Name for secure firmware binary. + * @ncores: Number of cores to setup. + * @arg: User argument to callback routine. + * @done: Firware load callback routine. + * + * Return: Valid pointer on success, else ERR_PTR. + */ +static struct mvx_secure_firmware_priv *secure_firmware_create( + struct mvx_secure *secure, + const char *name, + unsigned int ncores, + void *arg, + mvx_secure_firmware_done done) +{ + static struct kobj_attribute attr = __ATTR_WO(firmware); + static struct attribute *mvx_secure_fw_attrs[] = { + &attr.attr, + NULL + }; + ATTRIBUTE_GROUPS(mvx_secure_fw); + + static struct kobj_type secure_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .release = secure_firmware_release, + .default_groups = mvx_secure_fw_groups + }; + struct mvx_secure_firmware_priv *securefw; + char numcores_env[32]; + char fw_env[140]; + char *env[] = { "TYPE=firmware", numcores_env, fw_env, NULL }; + size_t n; + int ret; + + n = snprintf(fw_env, sizeof(fw_env), "FIRMWARE=%s.enc", name); + if (n >= sizeof(fw_env)) + return ERR_PTR(-EINVAL); + + n = snprintf(numcores_env, sizeof(numcores_env), "NUMCORES=%u", ncores); + if (n >= sizeof(numcores_env)) + return ERR_PTR(-EINVAL); + + /* Allocate and initialize the secure firmware object. */ + securefw = devm_kzalloc(secure->dev, sizeof(*securefw), GFP_KERNEL); + if (securefw == NULL) + return ERR_PTR(-ENOMEM); + + securefw->dev = secure->dev; + securefw->kobj.kset = secure->kset; + securefw->fw.ncores = ncores; + securefw->done = done; + securefw->done_arg = arg; + init_waitqueue_head(&securefw->wait_queue); + + /* Create kobject that the user space helper can interact with. */ + ret = kobject_init_and_add(&securefw->kobj, &secure_ktype, NULL, "%px", + securefw); + if (ret != 0) + goto put_kobject; + + /* Notify user space helper about the secure firmware load. */ + ret = kobject_uevent_env(&securefw->kobj, KOBJ_ADD, env); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to send secure firmware uevent. ret=%d.", + ret); + goto put_kobject; + } + + return securefw; + +put_kobject: + kobject_put(&securefw->kobj); + devm_kfree(secure->dev, securefw); + + return ERR_PTR(ret); +} + +int mvx_secure_request_firmware_nowait(struct mvx_secure *secure, + const char *name, + unsigned int ncores, + void *arg, + mvx_secure_firmware_done done) +{ + struct mvx_secure_firmware_priv *securefw; + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Request secure firmware load nowait. firmware=%s.enc.", + name); + + securefw = secure_firmware_create(secure, name, ncores, arg, done); + if (IS_ERR(securefw)) + return PTR_ERR(securefw); + + INIT_WORK(&securefw->work, secure_firmware_wait); + queue_work(secure->workqueue, &securefw->work); + + return 0; +} + +void mvx_secure_release_firmware(struct mvx_secure_firmware *securefw) +{ + struct mvx_secure_firmware_priv *sfw = + container_of(securefw, struct mvx_secure_firmware_priv, fw); + + kobject_put(&sfw->kobj); +} + +/**************************************************************************** + * Secure memory + ****************************************************************************/ + +/** + * secure_mem_release() - Release the secure memory object. + */ +static void secure_mem_release(struct kobject *kobj) +{ + struct mvx_secure_mem *smem = + container_of(kobj, struct mvx_secure_mem, kobj); + + devm_kfree(smem->dev, smem); +} + +/** + * memory_store() - Memory sysfs store function. + * + * Store values from memory descriptor, get the DMA handle and wake up any + * waiting process. + */ +static ssize_t memory_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t size) +{ + struct mvx_secure_mem *smem = + container_of(kobj, struct mvx_secure_mem, kobj); + const int32_t *fd = (const int32_t *)buf; + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Secure memory allocated. fd=%d.", + *fd); + + smem->dmabuf = dma_buf_get(*fd); + if (IS_ERR_OR_NULL(smem->dmabuf)) + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to get DMA buffer."); + + wake_up_interruptible(&smem->wait_queue); + + return size; +} + +struct dma_buf *mvx_secure_mem_alloc(struct mvx_secure *secure, + size_t size, + int region) +{ + static struct kobj_attribute attr = __ATTR_WO(memory); + static struct attribute *mvx_secure_mem_attrs[] = { + &attr.attr, + NULL + }; + ATTRIBUTE_GROUPS(mvx_secure_mem); + static struct kobj_type secure_mem_ktype = { + .release = secure_mem_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = mvx_secure_mem_groups + }; + struct mvx_secure_mem *smem; + char size_env[32]; + char region_env[32]; + char *env[] = { "TYPE=memory", size_env, region_env, NULL }; + struct dma_buf *dmabuf = ERR_PTR(-EINVAL); + size_t n; + int ret; + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Request secure memory. size=%zu, region=%d.", size, region); + + n = snprintf(size_env, sizeof(size_env), "SIZE=%zu", size); + if (n >= sizeof(size_env)) + return ERR_PTR(-EINVAL); + + n = snprintf(region_env, sizeof(region_env), "REGION=%d", region); + if (n >= sizeof(region_env)) + return ERR_PTR(-EINVAL); + + smem = devm_kzalloc(secure->dev, sizeof(*smem), GFP_KERNEL); + if (smem == NULL) + return ERR_PTR(-ENOMEM); + + smem->dev = secure->dev; + smem->kobj.kset = secure->kset; + init_waitqueue_head(&smem->wait_queue); + + /* Create kobject that the user space helper can interact with. */ + ret = kobject_init_and_add(&smem->kobj, &secure_mem_ktype, NULL, "%px", + &smem->kobj); + if (ret != 0) + goto put_kobject; + + /* Notify user space helper about the secure firmware load. */ + ret = kobject_uevent_env(&smem->kobj, KOBJ_ADD, env); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to send secure memory uevent. ret=%d.", + ret); + goto put_kobject; + } + + ret = wait_event_interruptible_timeout(smem->wait_queue, + smem->dmabuf != NULL, + msecs_to_jiffies(1000)); + if (ret == 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Secure memory allocation timed out."); + goto put_kobject; + } + + dmabuf = smem->dmabuf; + +put_kobject: + kobject_put(&smem->kobj); + + return dmabuf; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_secure.h b/drivers/media/platform/cix/cix_vpu/if/mvx_secure.h new file mode 100755 index 000000000000..f85eb539d77c --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_secure.h @@ -0,0 +1,139 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_SECURE_H_ +#define _MVX_SECURE_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct dma_buf; +struct kset; +struct mvx_secure_firmware; +struct workqueue_struct; + +/** + * struct mvx_secure - Secure video. + * @dev: Pointer to device. + * @kset: Kset that allows uevents to be sent. + * @workqueue: Work queue used to wait for firmware load. + */ +struct mvx_secure { + struct device *dev; + struct kset *kset; + struct workqueue_struct *workqueue; +}; + +/** + * typedef firmware_done - Firmware load callback. + */ +typedef void (*mvx_secure_firmware_done)(struct mvx_secure_firmware *, + void *arg); + +/** + * struct mvx_secure_firmware - Secure firmware. + * @dmabuf: Pointer to DMA buffer. + * @l2pages: Array of L2 pages. One per core. + * @ncores: Maximum number of cores. + * @major: Firmware protocol major version. + * @minor: Firmware protocol minor version. + */ +struct mvx_secure_firmware { + struct dma_buf *dmabuf; + phys_addr_t l2pages; + unsigned int ncores; + struct { + unsigned int major; + unsigned int minor; + } protocol; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_secure_construct() - Construct the secure object. + * @secure: Pointer to secure object. + * @dev: Pointer to device. + * + * Return: 0 on success, else error code. + */ +int mvx_secure_construct(struct mvx_secure *secure, + struct device *dev); + +/** + * mvx_secure_destruct() - Destruct the secure object. + * @secure: Pointer to secure object. + */ +void mvx_secure_destruct(struct mvx_secure *secure); + +/** + * mvx_secure_request_firmware_nowait() - Request secure firmware. + * @secure: Pointer to secure object. + * @name: Name of firmware binary. + * @ncores: Number of cores to setup. + * @arg: Callback argument. + * @done: Done callback. + * + * Return: 0 on success, else error code. + */ +int mvx_secure_request_firmware_nowait(struct mvx_secure *secure, + const char *name, + unsigned int ncores, + void *arg, + mvx_secure_firmware_done done); + +/** + * mvx_secure_release_firmware() - Release secure firmware. + * @securefw: Pointer to secure firmware. + */ +void mvx_secure_release_firmware(struct mvx_secure_firmware *securefw); + +/** + * mvx_secure_mem_alloc() - Secure memory allocation. + * @secure: Pointer to secure object. + * @size: Size in bytes to allocate. + * + * Return: Valid pointer on success, else ERR_PTR. + */ +struct dma_buf *mvx_secure_mem_alloc(struct mvx_secure *secure, + size_t size, int region); + +#endif /* _MVX_SECURE_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_session.c b/drivers/media/platform/cix/cix_vpu/if/mvx_session.c new file mode 100755 index 000000000000..8cc41a8384ad --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_session.c @@ -0,0 +1,6889 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvx_bitops.h" +#include "mvx_firmware.h" +#include "mvx_firmware_cache.h" +#include "mvx_session.h" +#include "mvx_seq.h" +#include "mvx_secure.h" + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#define FRAC_TO_Q16(num, den) ((uint32_t)((((uint64_t)(num)) << 16) / (den))) + +/* Limit bitstream size to 256M as VPU VA space is only 1280M for INTBUFS and BITBUFS */ +#define MAX_BITSTREAM_BUFFER_SIZE (1 << 28) + +#define MAX_RT_FPS_FRAMES (1 << 9) +#define FPS_SKIP_FRAMES 200 + +/**************************************************************************** + * Private variables + ****************************************************************************/ + +static int session_watchdog_timeout = 100; +module_param(session_watchdog_timeout, int, 0660); + +static int fw_watchdog_timeout; +module_param(fw_watchdog_timeout, int, 0660); + +static int wait_pending_timeout = 3000; /* 3s should be enough in worst case - 32Kx32K decode */ +module_param(wait_pending_timeout, int, 0660); + +static bool enable_buffer_dump = 0; +module_param(enable_buffer_dump, bool, 0660); + +/**************************************************************************** + * Private functions and and variables + ****************************************************************************/ +static struct mvx_session_format_map mvx_compressed_fmts[] = { + { .format = MVX_FORMAT_AVS, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_AVS, + .description = "AVS" }, + { .format = MVX_FORMAT_AVS2, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_AVS2, + .description = "AVS2" }, + { .format = MVX_FORMAT_H263, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_H263, + .description = "H.263" }, + { .format = MVX_FORMAT_H264, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_H264, + .description = "H.264" }, + { .format = MVX_FORMAT_HEVC, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_HEVC, + .description = "HEVC" }, + { .format = MVX_FORMAT_MPEG2, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_MPEG2, + .description = "MPEG-2 ES" }, + { .format = MVX_FORMAT_MPEG4, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_MPEG4, + .description = "MPEG-4 part 2 ES" }, + { .format = MVX_FORMAT_VC1, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_VC1_ANNEX_G, + .description = "VC-1 (SMPTE 412M Annex G)" }, + { .format = MVX_FORMAT_VC1, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_VC1_ANNEX_L, + .description = "VC-1 (SMPTE 412M Annex L)" }, + { .format = MVX_FORMAT_VP8, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_VP8, + .description = "VP8" }, + { .format = MVX_FORMAT_VP9, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_VP9, + .description = "VP9" }, + { .format = MVX_FORMAT_AV1, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_AV1, + .description = "AV1" }, + { .format = MVX_FORMAT_JPEG, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_JPEG, + .description = "JPEG" }, + { .format = MVX_FORMAT_JPEG, + .flags = V4L2_FMT_FLAG_COMPRESSED, + .pixelformat = V4L2_PIX_FMT_MJPEG, + .description = "MJPEG" } +}; + +static struct mvx_session_format_map mvx_raw_fmts[] = { + { .format = MVX_FORMAT_YUV420_AFBC_8, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV420_AFBC_8, + .description = "YUV420 AFBC 8 bit", + .bitdepth = 8, + .bpp = 0, + .to10_format = MVX_FORMAT_YUV420_AFBC_10, + .to10_pixelformat = V4L2_PIX_FMT_YUV420_AFBC_10}, + { .format = MVX_FORMAT_YUV420_AFBC_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV420_AFBC_10, + .description = "YUV420 AFBC 10 bit", + .bitdepth = 10, + .bpp = 0, + .to8_format = MVX_FORMAT_YUV420_AFBC_8, + .to8_pixelformat = V4L2_PIX_FMT_YUV420_AFBC_8}, + { .format = MVX_FORMAT_YUV422_AFBC_8, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV422_AFBC_8, + .description = "YUV422 AFBC 8 bit", + .bitdepth = 8, + .bpp = 0, + .to10_format = MVX_FORMAT_YUV422_AFBC_10, + .to10_pixelformat = V4L2_PIX_FMT_YUV422_AFBC_10}, + { .format = MVX_FORMAT_YUV422_AFBC_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV422_AFBC_10, + .description = "YUV422 AFBC 10 bit", + .bitdepth = 10, + .bpp = 0, + .to8_format = MVX_FORMAT_YUV422_AFBC_8, + .to8_pixelformat = V4L2_PIX_FMT_YUV422_AFBC_8}, + { .format = MVX_FORMAT_Y_AFBC_8, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_Y_AFBC_8, + .description = "GREY AFBC 8 bit", + .bitdepth = 8, + .bpp = 0, + .to10_format = MVX_FORMAT_Y_AFBC_10, + .to10_pixelformat = V4L2_PIX_FMT_Y_AFBC_10}, + { .format = MVX_FORMAT_Y_AFBC_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_Y_AFBC_10, + .description = "GREY AFBC 10 bit", + .bitdepth = 10, + .bpp = 0, + .to8_format = MVX_FORMAT_Y_AFBC_8, + .to8_pixelformat = V4L2_PIX_FMT_Y_AFBC_8}, + { .format = MVX_FORMAT_YUV420_NV12, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_NV12M, + .description = "Y/CbCr 4:2:0 (N-C)", + .bitdepth = 8, + .bpp = 12, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + { .format = MVX_FORMAT_YUV420_NV12, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_NV12, + .description = "Y/CbCr 4:2:0", + .bitdepth = 8, + .bpp = 12, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010 }, + { .format = MVX_FORMAT_YUV420_I420, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV420M, + .description = "Planar YUV 4:2:0 (N-C)", + .bitdepth = 8, + .bpp = 12, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + { .format = MVX_FORMAT_YUV420_I420, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV420, + .description = "Planar YUV 4:2:0", + .bitdepth = 8, + .bpp = 12, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010 }, + { .format = MVX_FORMAT_YUV420_NV21, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_NV21M, + .description = "Y/CrCb 4:2:0 (N-C)", + .bitdepth = 8, + .bpp = 12, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + { .format = MVX_FORMAT_YUV420_NV21, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_NV21, + .description = "Y/CrCb 4:2:0", + .bitdepth = 8, + .bpp = 12, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010 }, + { .format = MVX_FORMAT_YUV420_P010, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_P010M, + .description = "YUV 4:2:0 P010 (Microsoft format, N-C)", + .bitdepth = 10, + .bpp = 24, + .to8_format = MVX_FORMAT_YUV420_NV12, + .to8_pixelformat = V4L2_PIX_FMT_NV12M }, + { .format = MVX_FORMAT_YUV420_P010, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_P010, + .description = "YUV 4:2:0 P010 (Microsoft format)", + .bitdepth = 10, + .bpp = 24, + .to8_format = MVX_FORMAT_YUV420_NV12, + .to8_pixelformat = V4L2_PIX_FMT_NV12 }, + { .format = MVX_FORMAT_YUV420_Y0L2, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_Y0L2, + .description = "YUV 4:2:0 Y0L2 (ARM format)", + .bitdepth = 10, + .bpp = 24, + .to8_format = MVX_FORMAT_YUV420_NV12, + .to8_pixelformat = V4L2_PIX_FMT_NV12M }, + { .format = MVX_FORMAT_YUV420_AQB1, + .flags = 0, + .pixelformat = v4l2_fourcc('Y', '0', 'A', 'B'), + .description = "YUV 4:2:0 AQB1 (ARM format)", + .bitdepth = 10, + .bpp = 24, + .to8_format = MVX_FORMAT_YUV420_NV12, + .to8_pixelformat = V4L2_PIX_FMT_NV12M }, + { .format = MVX_FORMAT_YUV422_YUY2, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUYV, + .description = "YYUV 4:2:2", + .bitdepth = 8, + .bpp = 16, + .to10_format = MVX_FORMAT_YUV422_1P_10, + .to10_pixelformat = V4L2_PIX_FMT_YUV422_1P_10 }, + { .format = MVX_FORMAT_YUV422_UYVY, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_UYVY, + .description = "UYVY 4:2:2", + .bitdepth = 8, + .bpp = 16, + .to10_format = MVX_FORMAT_YUV422_1P_10, + .to10_pixelformat = V4L2_PIX_FMT_YUV422_1P_10 }, + { .format = MVX_FORMAT_YUV422_Y210, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_Y210, + .description = "YUV 4:2:2 Y210 (Microsoft format)", + .bitdepth = 8, + .bpp = 16, + .to10_format = MVX_FORMAT_YUV422_1P_10, + .to10_pixelformat = V4L2_PIX_FMT_YUV422_1P_10 }, + + /* ARGB */ + { .format = MVX_FORMAT_ARGB_8888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_ARGB32, + .description = "32-bit ARGB 8-8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + { .format = MVX_FORMAT_ARGB_8888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_RGB32, + .description = "32-bit ARGB 8-8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /* ABGR */ + { .format = MVX_FORMAT_ABGR_8888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_BGRA32, + .description = "32-bit ABGR-8-8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /* RGBA */ + { .format = MVX_FORMAT_RGBA_8888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_RGBA32, + .description = "32-bit RGBA 8-8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /* BGRA (new and legacy format) */ + { .format = MVX_FORMAT_BGRA_8888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_ABGR32, + .description = "32-bit BGRA 8-8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + { .format = MVX_FORMAT_BGRA_8888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_BGR32, + .description = "32-bit BGRA 8-8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /* RGB888 */ + { .format = MVX_FORMAT_RGB_888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_RGB24, + .description = "24-bit RGB 8-8-8", + .bitdepth = 8, + .bpp = 24, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /*BGR888 */ + { .format = MVX_FORMAT_BGR_888, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_BGR24, + .description = "24-bit BGR 8-8-8", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /*RGB888 3 PLANNER */ + { .format = MVX_FORMAT_RGB_888_3P, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_RGB_3P, + .description = "24-bit RGB 8-8-8 3PLANNER", + .bitdepth = 8, + .bpp = 32, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /*ARGB1555 1 PLANNER */ + { .format = MVX_FORMAT_ARGB_1555, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_ARGB555, + .description = "16-bit ARGB 1-5-5-5 1PLANNER", + .bitdepth = 8, + .bpp = 16, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /*ARGB 1 PLANNER */ + { .format = MVX_FORMAT_ARGB_4444, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_ARGB444, + .description = "16-bit ARGB 4-4-4-4 1PLANNER", + .bitdepth = 8, + .bpp = 16, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /*RGB888 3 PLANNER */ + { .format = MVX_FORMAT_RGB_565, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_RGB565, + .description = "16-bit RGB 5-6-5 1PLANNER", + .bitdepth = 8, + .bpp = 16, + .to10_format = MVX_FORMAT_YUV420_P010, + .to10_pixelformat = V4L2_PIX_FMT_P010M }, + + /*MVX_FORMAT_Y 1 PLANNER */ + { .format = MVX_FORMAT_Y, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_GREY, + .description = "8-bit GREY 1PLANNER", + .bitdepth = 8, + .bpp = 8, + .to10_format = MVX_FORMAT_Y_10, + .to10_pixelformat = V4L2_PIX_FMT_Y10_LE }, + + /*MVX_FORMAT_Y_10 1 PLANNER */ + { .format = MVX_FORMAT_Y_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_Y10_LE, + .description = "10-bit GREY 16BIT LSB 1PLANNER", + .bitdepth = 10, + .bpp = 16, + .to8_format = MVX_FORMAT_Y, + .to8_pixelformat = V4L2_PIX_FMT_GREY }, + + /*MVX_FORMAT_YUV444 3 PLANNER */ + { .format = MVX_FORMAT_YUV444, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV444M, + .description = "8-bit YUV444 3PLANNER", + .bitdepth = 8, + .bpp = 24, + .to10_format = MVX_FORMAT_YUV444_10, + .to10_pixelformat = V4L2_PIX_FMT_YUV444_10 }, + + /*MVX_FORMAT_YUV444_10 3 PLANNER */ + { .format = MVX_FORMAT_YUV444_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV444_10, + .description = "10-bit YUV444 16BIT LSB 3PLANNER", + .bitdepth = 10, + .bpp = 48, + .to8_format = MVX_FORMAT_YUV444, + .to8_pixelformat = V4L2_PIX_FMT_YUV444M }, + + /*MVX_FORMAT_YUV420_2P_10 2 PLANNER */ + { .format = MVX_FORMAT_YUV420_2P_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV420_2P_10, + .description = "10-bit YUV420 16BIT LSB 2PLANNER", + .bitdepth = 10, + .bpp = 24, + .to8_format = MVX_FORMAT_YUV420_NV12, + .to8_pixelformat = V4L2_PIX_FMT_NV12M }, + + /*MVX_FORMAT_YUV422_1P_10 1 PLANNER */ + { .format = MVX_FORMAT_YUV422_1P_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV422_1P_10, + .description = "10-bit YUV422 16BIT LSB 1PLANNER", + .bitdepth = 10, + .bpp = 32, + .to8_format = MVX_FORMAT_YUV422_YUY2, + .to8_pixelformat = V4L2_PIX_FMT_YUYV }, + + /*MVX_FORMAT_YUV420_I420_10 3 PLANNER */ + { .format = MVX_FORMAT_YUV420_I420_10, + .flags = 0, + .pixelformat = V4L2_PIX_FMT_YUV420_I420_10, + .description = "10-bit YUV420 16BIT LSB 3PLANNER", + .bitdepth = 10, + .bpp = 24, + .to8_format = MVX_FORMAT_YUV420_I420, + .to8_pixelformat = V4L2_PIX_FMT_YUV420M } +}; + +static void watchdog_start(struct mvx_session *session, + unsigned int timeout_ms, bool reset_count) +{ + int ret; + + if (session->error != 0) + return; + + MVX_SESSION_DEBUG(session, "Watchdog start. timeout_ms=%u, reset_count=%d", + timeout_ms, reset_count); + + ret = mod_timer(&session->watchdog_timer, + jiffies + msecs_to_jiffies(timeout_ms)); + if (ret != 0) + return; + + if (reset_count) + session->watchdog_count = 0; + + kref_get(&session->isession.kref); +} + +static void watchdog_stop(struct mvx_session *session) +{ + int ret; + + ret = del_timer_sync(&session->watchdog_timer); + + /* ret: 0=watchdog expired, 1=watchdog still running */ + MVX_SESSION_DEBUG(session, "Watchdog stop. ret=%d", ret); + + /* Decrement the kref if the watchdog was still running. */ + if (ret != 0) + kref_put(&session->isession.kref, session->isession.release); +} + +static void watchdog_update(struct mvx_session *session, + unsigned int timeout_ms) +{ + int ret; + + ret = mod_timer_pending(&session->watchdog_timer, + jiffies + msecs_to_jiffies(timeout_ms)); + + /* ret: 0=no restart, 1=restarted */ + MVX_SESSION_DEBUG(session, "Watchdog update. ret=%d, timeout_ms=%u.", + ret, timeout_ms); +} + +static bool is_fw_loaded(struct mvx_session *session) +{ + return (IS_ERR_OR_NULL(session->fw_bin) == false); +} + +static void print_debug(struct mvx_session *session) +{ + MVX_SESSION_INFO(session, "Print debug."); + + if (session->csession != NULL) + session->client_ops->print_debug(session->csession); + + if (is_fw_loaded(session)) + session->fw.ops.print_debug(&session->fw); +} + +static void dump_ivf_header(struct mvx_session *session) +{ + struct mvx_session_port *input = &session->port[MVX_DIR_INPUT]; + bool is_ivf = (input->format == MVX_FORMAT_AV1 || + input->format == MVX_FORMAT_VP8 || + input->format == MVX_FORMAT_VP9); + + if (!current->fs) { + MVX_SESSION_ERR(session, "process exit abnormally,fs has been released!!!"); + return; + } + + if (is_ivf) { + uint32_t alloc_bytes = MVE_PAGE_SIZE; + struct mvx_session_port *output = &session->port[MVX_DIR_OUTPUT]; + struct mvx_ivf_header ivf_header = { + .signature = v4l2_fourcc('D', 'K', 'I', 'F'), + .version = 0, + .length = sizeof(struct mvx_ivf_header), + .fourcc = input->pixelformat, + .width = output->width, + .height = output->height, + .frameRate = (30 << 16), + .timeScale = (1 << 16), + .frameCount = input->dump_count, + .padding = 0 + }; + uint8_t *ivf_header_buf = (uint8_t*)(&ivf_header); + struct file *ivf_fp = NULL; + char ivf_file[64]; + char data_file[64]; + scnprintf(ivf_file, sizeof(ivf_file) - 1, + "/data/input_session_%p.ivf", session); + scnprintf(data_file, sizeof(data_file) - 1, + "/data/input_session_%p.bin", session); + + ivf_fp = filp_open(ivf_file, O_RDWR | O_CREAT, 0644); + if (IS_ERR(ivf_fp)) { + MVX_SESSION_WARN(session, "warning: open file(%s) fail", ivf_file); + return; + } + // write ivf header into new file at the beginning + kernel_write(ivf_fp, ivf_header_buf, sizeof(struct mvx_ivf_header), &(ivf_fp->f_pos)); + + session->data_fp = filp_open(data_file, O_RDONLY, 0644); + if (IS_ERR(session->data_fp)) { + MVX_SESSION_WARN(session, "warning: open file(%s) fail", data_file); + filp_close(ivf_fp, NULL); + return; + } else { + char *ivf_data = kmalloc(alloc_bytes, GFP_KERNEL); + if (ivf_data != NULL) { + size_t read_bytes; + // write ivf data from data file read + while (read_bytes = kernel_read(session->data_fp, ivf_data, alloc_bytes, &(session->data_fp->f_pos)), read_bytes > 0) { + kernel_write(ivf_fp, ivf_data, read_bytes, &(ivf_fp->f_pos)); + if (read_bytes < alloc_bytes) + break; + } + kfree(ivf_data); + } + } + filp_close(session->data_fp, NULL); + filp_close(ivf_fp, NULL); + } +} + +static void send_event_error(struct mvx_session *session, + long error) +{ + session->error = error; + wake_up(&session->waitq); + session->event(session, MVX_SESSION_EVENT_ERROR, + (void *)session->error); +} + +static void mvx_session_construct_average_fps_msg(struct mvx_session *session, + int fps, uint64_t frame_count, time64_t start_sec, time64_t end_sec) +{ + if (mvx_log_perf.avgfps) { + struct rtc_time start, end; + enum mvx_direction dir = session->is_encoder ? MVX_DIR_OUTPUT : MVX_DIR_INPUT; + struct mvx_session_port *p = &session->port[dir]; + struct mvx_session_format_map *map = mvx_session_find_format(p->pixelformat); + + rtc_time64_to_tm(start_sec, &start); + rtc_time64_to_tm(end_sec, &end); + + mutex_lock(&mvx_log_perf.mutex); + + snprintf(mvx_log_perf.avgfps + MVX_LOG_FPS_MSG_UNIT_SIZE * mvx_log_perf.fps_msg_w, + MVX_LOG_FPS_MSG_UNIT_SIZE, + "%02d:%02d:%02d ~ %02d:%02d:%02d [%px] P%d %s %s %dx%d %lld frames, average fps %d.%02d\n", + start.tm_hour, start.tm_min, start.tm_sec, + end.tm_hour, end.tm_min, end.tm_sec, + session, session->priority, map->description, session->is_encoder ? "encoder" : "decoder", + p->width, p->height, frame_count, fps / 100, fps % 100); + + mvx_log_perf.fps_msg_w = (mvx_log_perf.fps_msg_w + 1) & 31; + mvx_log_perf.has_update = true; + + mutex_unlock(&mvx_log_perf.mutex); + } +} + +static int mvx_session_calculate_average_fps(struct mvx_session *session, + int ts_index, uint64_t frame_count) +{ + struct timespec64 delta; + uint64_t delta_us; + + frame_count -= FPS_SKIP_FRAMES; + delta = timespec64_sub(session->ts[ts_index], session->start); + delta_us = timespec64_to_ns(&delta) / 1000; + return (int)((frame_count * 100 * 1000 * 1000) / delta_us); +} + +static void mvx_session_update_average_fps(struct mvx_session *session) +{ + int ts_index, fps; + uint64_t frame_count; + + if (!(mvx_log_perf.enabled & MVX_LOG_PERF_FPS) || !session->ts) + return; + + mutex_lock(&session->fps_mutex); + ts_index = session->ts_index; + frame_count = session->frame_count; + mutex_unlock(&session->fps_mutex); + + if (frame_count <= FPS_SKIP_FRAMES) + return; + + ts_index = ts_index == 0 ? MAX_RT_FPS_FRAMES - 1 : ts_index - 1; + fps = mvx_session_calculate_average_fps(session, ts_index, frame_count); + mvx_session_construct_average_fps_msg(session, + fps, frame_count, session->start.tv_sec, session->ts[ts_index].tv_sec); +} + +static void session_unregister(struct mvx_session *session) +{ + if (!IS_ERR_OR_NULL(session->csession)) { + if (session->frame_count > FPS_SKIP_FRAMES && session->ts) { + mvx_session_update_average_fps(session); + } + session->client_ops->unregister_session(session->csession); + session->csession = NULL; + } +} + +static void release_fw_bin(struct mvx_session *session) +{ + if (is_fw_loaded(session) != false) { + MVX_SESSION_INFO(session, "Release firmware binary."); + + mvx_fw_destruct(&session->fw); + mvx_fw_cache_put(session->cache, session->fw_bin); + session->fw_bin = NULL; + complete(&session->fw_loaded); + } + + watchdog_stop(session); + session_unregister(session); +} + +static struct mvx_session *kref_to_session(struct kref *kref) +{ + return container_of(kref, struct mvx_session, isession.kref); +} + +static void session_destructor(struct kref *kref) +{ + struct mvx_session *session = kref_to_session(kref); + + session->destructor(session); +} + +static const char *state_to_string(enum mvx_fw_state state) +{ + switch (state) { + case MVX_FW_STATE_STOPPED: + return "Stopped"; + case MVX_FW_STATE_RUNNING: + return "Running"; + default: + return "Unknown"; + } +} + +static enum mvx_direction get_bitstream_port(struct mvx_session *session) +{ + if (mvx_is_bitstream(session->port[MVX_DIR_INPUT].format) && + mvx_is_frame(session->port[MVX_DIR_OUTPUT].format)) + return MVX_DIR_INPUT; + else if (mvx_is_frame(session->port[MVX_DIR_INPUT].format) && + mvx_is_bitstream(session->port[MVX_DIR_OUTPUT].format)) + return MVX_DIR_OUTPUT; + + return MVX_DIR_MAX; +} + +static bool is_stream_on(struct mvx_session *session) +{ + if (session->is_encoder) + return session->port[MVX_DIR_INPUT].stream_on && + session->port[MVX_DIR_OUTPUT].stream_on; + else + return session->port[MVX_DIR_INPUT].stream_on; +} + +/** + * wait_pending() - Wait for procedure to finish. + * + * Wait for the number of pending firmware messages to reach 0, or for an error + * to happen. + * + * Return: 0 on success, else error code. + */ +static int wait_pending(struct mvx_session *session) +{ + int ret = 0; + + while (is_fw_loaded(session) != false && + session->fw.msg_pending > 0 && + session->error == 0) { + mutex_unlock(session->isession.mutex); + + ret = wait_event_timeout( + session->waitq, + is_fw_loaded(session) == false || + session->fw.msg_pending == 0 || + session->error != 0, + msecs_to_jiffies(wait_pending_timeout)); + + if (ret == 0) { + MVX_SESSION_WARN(session, + "Wait pending returned timeout, msg_pending=%d. try again.", + session->fw.msg_pending); + session->client_ops->soft_irq(session->csession); + ret = wait_event_timeout( + session->waitq, + is_fw_loaded(session) == false || + session->fw.msg_pending == 0 || + session->error != 0, + msecs_to_jiffies(wait_pending_timeout)); + + if (ret == 0) { + send_event_error(session, -ETIME); + ret = -ETIME; + goto lock_mutex; + } + } + + if (ret < 0) + goto lock_mutex; + + mutex_lock(session->isession.mutex); + } + + return session->error; + +lock_mutex: + mutex_lock(session->isession.mutex); + + if (ret < 0) + MVX_SESSION_WARN(session, + "Wait pending returned error. ret=%d, error=%d, msg_pending=%d.", + ret, session->error, session->fw.msg_pending); + + return ret; +} + +static int send_irq(struct mvx_session *session) +{ + if (IS_ERR_OR_NULL(session->csession)) + return -EINVAL; + + return session->client_ops->send_irq(session->csession); +} + +/** + * switch_in() - Request the client device to switch in the session. + * + * Return: 0 on success, else error code. + */ +static int switch_in(struct mvx_session *session) +{ + int ret; + + session->idle_count = 0; + + if (session->switched_in != false) + return 0; + + MVX_SESSION_INFO(session, "Switch in."); + watchdog_start(session, session_watchdog_timeout, true); + + ret = session->client_ops->switch_in(session->csession); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to switch in session."); + send_event_error(session, ret); + return ret; + } + + session->switched_in = true; + + return 0; +} + +static int switch_out_rsp(struct mvx_session *session) +{ + session->switched_in = false; + session->client_ops->switch_out_rsp(session->csession); + + return 0; +} + +/** + * fw_send_msg() - Send firmware message and signal IRQ. + * + * Return: 0 on success, else error code. + */ +static int fw_send_msg(struct mvx_session *session, + struct mvx_fw_msg *msg) +{ + int ret; + + if (session->error != 0) + return session->error; + + ret = session->fw.ops.put_message(&session->fw, msg); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to queue firmware message."); + goto send_error; + } + + ret = send_irq(session); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to send irq."); + goto send_error; + } + + return switch_in(session); + +send_error: + send_event_error(session, ret); + return ret; +} + +static int fw_send_msg_simple(struct mvx_session *session, + enum mvx_fw_code code, + const char *str) +{ + struct mvx_fw_msg msg = { .code = code }; + + MVX_SESSION_INFO(session, "Firmware req: %s.", str); + + return fw_send_msg(session, &msg); +} + +static int fw_flush(struct mvx_session *session, + enum mvx_direction dir) +{ + struct mvx_fw_msg msg = { .code = MVX_FW_CODE_FLUSH, .flush.dir = dir }; + int ret = 0; + + MVX_SESSION_INFO(session, "Firmware req: Flush. dir=%d.", dir); + + ret = fw_send_msg(session, &msg); + if (ret != 0) + return ret; + + session->port[dir].is_flushing = true; + + return 0; +} + +static int fw_state_change(struct mvx_session *session, + enum mvx_fw_state state) +{ + struct mvx_fw_msg msg = { + .code = MVX_FW_CODE_STATE_CHANGE, + .state = state + }; + int ret = 0; + + if (state != session->fw_state) { + MVX_SESSION_INFO(session, + "Firmware req: State change. current=%d, new=%d.", + session->fw_state, state); + ret = fw_send_msg(session, &msg); + } + + return ret; +} + +static int fw_job(struct mvx_session *session, + unsigned int frames) +{ + struct mvx_fw_msg msg = { + .code = MVX_FW_CODE_JOB, + .job.cores = session->isession.ncores, + .job.frames = frames + }; + + MVX_SESSION_INFO(session, "Firmware req: Job. frames=%u.", frames); + + return fw_send_msg(session, &msg); +} + +static int fw_switch_out(struct mvx_session *session) +{ + unsigned int idle_count = session->idle_count; + int ret; + + ret = fw_send_msg_simple(session, MVX_FW_CODE_SWITCH_OUT, + "Switch out"); + + /* + * Restore idle count. Switch out is the only message where we do not + * want to reset the idle counter. + */ + session->idle_count = idle_count; + + return ret; +} + +static int fw_ping(struct mvx_session *session) +{ + return fw_send_msg_simple(session, MVX_FW_CODE_PING, "Ping"); +} + +static int fw_dump(struct mvx_session *session) +{ + return fw_send_msg_simple(session, MVX_FW_CODE_DUMP, "Dump"); +} + +static int fw_set_debug(struct mvx_session *session, uint32_t debug_level) +{ + struct mvx_fw_msg msg = { + .code = MVX_FW_CODE_DEBUG, + .arg = debug_level + }; + + MVX_SESSION_INFO(session, "Firmware req: Set debug. debug_level=%d.", debug_level); + + return fw_send_msg(session, &msg); +} + +static int fw_set_option(struct mvx_session *session, + struct mvx_fw_set_option *option) +{ + struct mvx_fw_msg msg = { + .code = MVX_FW_CODE_SET_OPTION, + .set_option = *option + }; + + MVX_SESSION_INFO(session, "Firmware req: Set option. code=%d.", + option->code); + + return fw_send_msg(session, &msg); +} + +static int fw_eos(struct mvx_session *session) +{ + struct mvx_fw_msg msg = { + .code = MVX_FW_CODE_EOS, + .eos_is_frame = session->is_encoder + }; + int ret = 0; + + MVX_SESSION_INFO(session, "Firmware req: Buffer EOS."); + + ret = fw_send_msg(session, &msg); + if (ret != 0) + return ret; + + session->port[MVX_DIR_INPUT].flushed = false; + + return 0; +} + +static int fw_set_epr_qp(struct mvx_session *session, + int code, + struct mvx_buffer_param_qp qp) +{ + struct mvx_fw_set_option option; + int ret; + + if (qp.qp < 0) + return -EINVAL; + + if (qp.qp == 0) + return 0; + + option.code = code; + option.epr_qp = qp; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set EPR QP. code=%d, ret=%d.", + code, ret); + return ret; + } + + return 0; +} +static int fw_set_qp(struct mvx_session *session, + int code, + int qp) +{ + struct mvx_fw_set_option option; + int ret; + + if (qp < 0) + return -EINVAL; + + if (qp == 0) + return 0; + + option.code = code; + option.qp = qp; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set QP. code=%d, ret=%d.", + code, ret); + return ret; + } + + return 0; +} + +static int fw_set_osd_config(struct mvx_session *session, + int code, + struct mvx_osd_config *osd) +{ + struct mvx_fw_set_option option; + int ret; + + option.code = code; + option.osd_config = *osd; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set OSD config. code=%d, ret=%d.", + code, ret); + return ret; + } + + return 0; +} + +static int fw_set_roi_regions(struct mvx_session *session, + int code, + struct mvx_roi_config *roi) +{ + struct mvx_fw_set_option option; + int ret; + + if (roi->num_roi < 0) + return -EINVAL; + + if (roi->num_roi == 0) + return 0; + + option.code = code; + option.roi_config = *roi; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set ROI. code=%d, ret=%d.", + code, ret); + return ret; + } + + return 0; +} + +static int fw_set_chr_cfg(struct mvx_session *session, + int code, + struct mvx_chr_cfg *chr) +{ + struct mvx_fw_set_option option; + int ret; + + if (chr->num_chr < 0) + return -EINVAL; + + if (chr->num_chr == 0) + return 0; + + option.code = code; + option.chr_cfg = *chr; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set CHR CFG. code=%d, ret=%d.", + code, ret); + return ret; + } + + return 0; +} + +static int fw_set_enc_stats(struct mvx_session *session, + int code, + struct mvx_enc_stats *stats) +{ + struct mvx_fw_set_option option; + int ret; + + if (stats->flags == 0) + return 0; + + option.code = code; + option.enc_stats = *stats; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set enc stats param. code=%d, ret=%d.", + code, ret); + return ret; + } + + return 0; +} + +static int fw_common_setup(struct mvx_session *session) +{ + int ret = 0; + struct mvx_fw_set_option option; + + if (session->nalu_format != MVX_NALU_FORMAT_UNDEFINED && + session->port[MVX_DIR_INPUT].format != MVX_FORMAT_AV1) { + option.code = MVX_FW_SET_NALU_FORMAT; + option.nalu_format = session->nalu_format; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set NALU format."); + return ret; + } + } + + if (session->stream_escaping != MVX_TRI_UNSET) { + option.code = MVX_FW_SET_STREAM_ESCAPING; + option.stream_escaping = session->stream_escaping; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set stream escaping."); + return ret; + } + } + + if (mvx_log_perf.enabled & MVX_LOG_PERF_UTILIZATION) { + option.code = MVX_FW_SET_PROFILING; + option.profiling = 1; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to enable FW profiling."); + return ret; + } + } + return ret; +} + +/* JPEG standard, Annex K */ +static const uint8_t qtbl_chroma_ref[MVX_FW_QUANT_LEN] = { + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99 +}; + +static const uint8_t qtbl_luma_ref[MVX_FW_QUANT_LEN] = { + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99 +}; + +void generate_quant_tbl(int quality, + const uint8_t qtbl_ref[MVX_FW_QUANT_LEN], + uint8_t qtbl[MVX_FW_QUANT_LEN]) +{ + int i; + int q; + int t; + + q = (quality < 50) ? (5000 / quality) : (200 - 2 * quality); + + for (i = 0; i < MVX_FW_QUANT_LEN; ++i) { + t = ((qtbl_ref[i] * q) + 50) / 100; + t = min_t(int, t, 255); + t = max_t(int, t, 1); + qtbl[i] = t; + } +} + +static int generate_standards_yuv2rgb_coef(enum mvx_yuv_to_rgb_mode mode,struct mvx_color_conv_coef *color_conv_coef) +{ + static struct mvx_color_conv_coef color_standards[] = + { + { {{4769, 4769, 4769}, {0, -1605, 8263}, {6537, -3330, 0}}, {16, 128, 128}}, + { {{4096, 4096, 4096}, {0, -1410, 7258}, {5743, -2925, 0}}, {0 , 128, 128}}, + { {{4769, 4769, 4769}, {0, -873, 8652}, {7343, -2183, 0}}, {16, 128, 128}}, + { {{4096, 4096, 4096}, {0, -767, 7601}, {6450, -1917, 0}}, {0 , 128, 128}}, + { {{4769, 4769, 4769}, {0, -767, 8773}, {6876, -2664, 0}}, {16, 128, 128}}, + { {{4096, 4096, 4096}, {0, -674, 7706}, {6040, -2340, 0}}, {0 , 128, 128}}, + }; + + if(mode =MVX_YUV_TO_RGB_MODE_MAX) + { + mode =MVX_YUV_TO_RGB_MODE_BT601_LIMT; + //return -EINVAL; + } + + memcpy(color_conv_coef,&color_standards[mode],sizeof(struct mvx_color_conv_coef)); + + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_DEBUG, + "generate_standards_yuv2rgb_coef.mode indx=%d 3x3=[%d %d %d, %d %d %d,%d %d %d],offset=[%d %d %d]",mode, + color_conv_coef->coef[0][0], + color_conv_coef->coef[0][1], + color_conv_coef->coef[0][2], + color_conv_coef->coef[1][0], + color_conv_coef->coef[1][1], + color_conv_coef->coef[1][2], + color_conv_coef->coef[2][0], + color_conv_coef->coef[2][1], + color_conv_coef->coef[2][2], + color_conv_coef->offset[0], + color_conv_coef->offset[1], + color_conv_coef->offset[2]); + + return 0; +} + +static int generate_standards_rgb2yuv_coef(enum mvx_rgb_to_yuv_mode mode,struct mvx_rgb2yuv_color_conv_coef *color_conv_coef) +{ + static struct mvx_rgb2yuv_color_conv_coef color_standards[] = + { + {{1052, 2065, 401, -607, -1192, 1799, 1799, -1506, -293}, {16,235},{16,240}, {0, 255}}, + {{1225, 2404, 467, -691, -1357, 2048, 2048, -1715, -333}, { 0,255},{ 0,255}, {0, 255}}, + {{ 748, 2516, 254, -412, -1387, 1799, 1799, -1634, -165}, {16,235},{16,240}, {0, 255}}, + {{ 871, 2929, 296, -469, -1579, 2048, 2048, -1860, -188}, { 0,255},{ 0,255}, {0, 255}}, + {{ 924, 2385, 209, -502, -1297, 1799, 1799, -1654, -145}, {16,235},{16,240}, {0, 255}}, + {{1076, 2777, 243, -572, -1476, 2048, 2048, -1883, -165}, { 0,255},{ 0,255}, {0, 255}}, + }; + + if(mode =MVX_RGB_TO_YUV_MODE_MAX) + { + mode =MVX_RGB_TO_YUV_MODE_BT601_STUDIO; + } + + memcpy(color_conv_coef,&color_standards[mode],sizeof(struct mvx_rgb2yuv_color_conv_coef)); + + return 0; +} + +static int fw_encoder_setup(struct mvx_session *session) +{ + int ret; + enum mvx_format codec; + struct mvx_fw_set_option option; + enum mvx_direction dir; + + dir = get_bitstream_port(session); + codec = session->port[dir].format; + + if (session->profile[codec] != MVX_PROFILE_NONE) { + option.code = MVX_FW_SET_PROFILE_LEVEL; + option.profile_level.profile = session->profile[codec]; + option.profile_level.level = session->level[codec]; + option.profile_level.tier = session->tier[codec]; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set profile/level."); + return ret; + } + } + + if (session->rc_enabled && session->rc_type) { + option.code = MVX_FW_SET_RATE_CONTROL; + option.rate_control.target_bitrate = + session->rc_type ? session->target_bitrate:0; + option.rate_control.rate_control_mode = session->rc_type; + if (session->rc_type == MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE) { + option.rate_control.maximum_bitrate = session->maximum_bitrate; + } + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to put target bitrate."); + return ret; + } + } + + if (codec != MVX_FORMAT_JPEG) { + option.code = MVX_FW_SET_FRAME_RATE; + option.frame_rate = FRAC_TO_Q16(session->fps_n, session->fps_d); + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to put frame rate."); + return ret; + } + + if (session->rc_bit_i_mode != 0) { + option.code = MVX_FW_SET_RC_BIT_I_MODE; + option.rc_bit_i_mode = session->rc_bit_i_mode; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to put rc bit i mode."); + return ret; + } + } + if (session->rc_bit_i_ratio != 0) { + option.code = MVX_FW_SET_RC_BIT_I_RATIO; + option.rc_bit_i_ratio = session->rc_bit_i_ratio; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to put rc bit i ratio."); + return ret; + } + } + + if (session->mulit_sps_pps != 0) { + option.code = MVX_FW_SET_MULIT_SPS_PPS; + option.mulit_sps_pps = session->mulit_sps_pps; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to support multi SPS PSS."); + return ret; + } + } + + if (session->scd_enable != 0) { + option.code = MVX_FW_SET_SCD_ENABLE; + option.scd_enable = session->scd_enable; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to enable SCD."); + return ret; + } + } + + if (session->scd_enable != 0 && session->scd_percent >= 0 && session->scd_percent <= 10) { + option.code = MVX_FW_SET_SCD_PERCENT; + option.scd_percent = session->scd_percent; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set SCD percent."); + return ret; + } + } + + if (session->scd_enable != 0 && session->scd_threshold >=0 && session->scd_threshold <= 2047) { + option.code = MVX_FW_SET_SCD_THRESHOLD; + option.scd_threshold = session->scd_threshold; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set SCD threshold."); + return ret; + } + } + + if (session->aq_ssim_en != 0 && + (codec == MVX_FORMAT_H264 || + codec == MVX_FORMAT_HEVC)) { + option.code = MVX_FW_SET_AQ_SSIM_EN; + option.aq_ssim_en = session->aq_ssim_en; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to enable SSIM."); + return ret; + } + } + + if (session->aq_ssim_en != 0 && session->aq_neg_ratio >= 0 && + session->aq_neg_ratio <= 63 && + (codec == MVX_FORMAT_H264 || + codec == MVX_FORMAT_HEVC)) { + option.code = MVX_FW_SET_AQ_NEG_RATIO; + option.aq_neg_ratio = session->aq_neg_ratio; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set AQ negative ratio."); + return ret; + } + } + + if (session->aq_ssim_en != 0 && session->aq_pos_ratio >= 0 && + session->aq_pos_ratio <= 63 && + (codec == MVX_FORMAT_H264 || + codec == MVX_FORMAT_HEVC)) { + option.code = MVX_FW_SET_AQ_POS_RATIO; + option.aq_pos_ratio = session->aq_pos_ratio; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set AQ positive ratio."); + return ret; + } + } + + if (session->aq_ssim_en != 0 && session->aq_qpdelta_lmt >= 0 && + session->aq_qpdelta_lmt <= 7 && + (codec == MVX_FORMAT_H264 || + codec == MVX_FORMAT_HEVC)) { + option.code = MVX_FW_SET_AQ_QPDELTA_LMT; + option.aq_qpdelta_lmt = session->aq_qpdelta_lmt; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set AQ QPDelta LMT."); + return ret; + } + } + + if (session->aq_ssim_en != 0 && session->aq_init_frm_avg_svar >= 0 && + session->aq_init_frm_avg_svar <=15 && + (codec == MVX_FORMAT_H264 || + codec == MVX_FORMAT_HEVC)) { + option.code = MVX_FW_SET_AQ_INIT_FRM_AVG_SVAR; + option.aq_init_frm_avg_svar = session->aq_init_frm_avg_svar; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to initial frame variance."); + return ret; + } + } + + if (session->enable_visual != 0) { + option.code = MVX_FW_SET_VISUAL_ENABLE; + option.enable_visual = session->enable_visual; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to enable visual."); + return ret; + } + option.code = MVX_FW_SET_ADPTIVE_QUANTISATION; + option.adapt_qnt = 3;//set to 3 if enable visual + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set adaptive quantisation."); + return ret; + } + } + + if (session->adaptive_intra_block != 0) { + option.code = MVX_FW_SET_VISUAL_ENABLE_ADAPTIVE_INTRA_BLOCK; + option.adaptive_intra_block = session->adaptive_intra_block; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to enable adaptive intra block."); + return ret; + } + } + + if (session->rc_enabled != false) { + if (session->qp[codec].min <= session->qp[codec].max) { + option.code = MVX_FW_SET_QP_RANGE; + option.qp_range.min = session->qp[codec].min; + option.qp_range.max = session->qp[codec].max; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set qp range."); + return ret; + } + } + } + if (session->fixedqp != 0) { + ret = fw_set_qp(session, MVX_FW_SET_FIXED_QP, + session->fixedqp); + if (ret != 0) + return ret; + } else { + if (session->qp[codec].i_frame != 0) { + ret = fw_set_qp(session, MVX_FW_SET_QP_I, + session->qp[codec].i_frame); + if (ret != 0) + return ret; + } + if (session->qp[codec].p_frame != 0) { + ret = fw_set_qp(session, MVX_FW_SET_QP_P, + session->qp[codec].p_frame); + if (ret != 0) + return ret; + } + if (session->qp[codec].b_frame != 0) { + ret = fw_set_qp(session, MVX_FW_SET_QP_B, + session->qp[codec].b_frame); + if (ret != 0) + return ret; + } + } + + if ((session->min_qp_i <= session->max_qp_i) && (session->max_qp_i != 0)) { + option.code = MVX_FW_SET_QP_RANGE_I; + option.qp_range.min = session->min_qp_i; + option.qp_range.max = session->max_qp_i; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set qp range."); + return ret; + } + } + + if (session->b_frames != 0) { + if (session->port[MVX_DIR_OUTPUT].format == MVX_FORMAT_VP8 || + (session->port[MVX_DIR_OUTPUT].format == MVX_FORMAT_H264 && + session->profile[MVX_FORMAT_H264] == MVX_PROFILE_H264_BASELINE)) { + MVX_SESSION_WARN(session, "The target format or profile does not support set B frames"); + + // Rest B-frames and update P-frames + session->b_frames = 0; + session->port[MVX_DIR_INPUT].buffer_min = 1; + session->p_frames = session->gop_size - 1; + } else { + option.code = MVX_FW_SET_B_FRAMES; + option.pb_frames = session->b_frames; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to set B frames."); + return ret; + } + } + } + + option.code = MVX_FW_SET_P_FRAMES; + option.pb_frames = session->p_frames; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to set P frames."); + return ret; + } + + if (session->gop_type != MVX_GOP_TYPE_NONE) { + option.code = MVX_FW_SET_GOP_TYPE; + option.gop_type = session->gop_type; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set GOP type."); + return ret; + } + } + + if (session->inter_med_buf_size != 0) { + option.code = MVX_FW_SET_INTER_MED_BUF_SIZE; + option.inter_med_buf_size = session->inter_med_buf_size; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set inter mediate buffer size."); + return ret; + } + } + + if (session->svct3_level1_period != 0) { + option.code = MVX_FW_SET_SVCT3_LEVEL1_PERIOD; + option.svct3_level1_period = session->svct3_level1_period; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set svct3_level1_period."); + return ret; + } + } + + if (session->reset_ltr_period != 0) { + option.code = MVX_FW_SET_LTR_PERIOD; + option.reset_ltr_period = session->reset_ltr_period; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set ltr period."); + return ret; + } + } + + if (session->reset_gop_pframes != 0) { + option.code = MVX_FW_SET_GOP_PFRAMES; + option.reset_gop_pframes = session->reset_gop_pframes; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set gop pframes."); + return ret; + } + } + + if (session->cyclic_intra_refresh_mb != 0) { + option.code = MVX_FW_SET_INTRA_MB_REFRESH; + option.intra_mb_refresh = + session->cyclic_intra_refresh_mb; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set cyclic intra refresh Mb."); + return ret; + } + } + + if (session->constr_ipred != MVX_TRI_UNSET && + (codec == MVX_FORMAT_H264 || codec == MVX_FORMAT_HEVC)) { + option.code = MVX_FW_SET_CONSTR_IPRED; + option.constr_ipred = session->constr_ipred; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set constr ipred."); + return ret; + } + } + } else { + /* + //handle JPEG rc + if (session->rc_type) { + option.code = MVX_FW_SET_RATE_CONTROL_JPEG; + + option.jpeg_rc.fps = FRAC_TO_Q16(session->fps_n, session->fps_d); + option.jpeg_rc.qscale = session->jpeg_quality; + option.jpeg_rc.qscale_chroma = session->jpeg_quality_chroma; + option.jpeg_rc.qscale_luma = session->jpeg_quality_luma; + + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to put target bitrate."); + return ret; + } + } + */ + } + + if (codec == MVX_FORMAT_HEVC) { + if (session->entropy_sync != MVX_TRI_UNSET) { + option.code = MVX_FW_SET_ENTROPY_SYNC; + option.entropy_sync = session->entropy_sync; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set entropy sync."); + return ret; + } + } + + if (session->temporal_mvp != MVX_TRI_UNSET) { + option.code = MVX_FW_SET_TEMPORAL_MVP; + option.temporal_mvp = session->temporal_mvp; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set temporal mvp."); + return ret; + } + } + if (session->min_luma_cb_size != 0) { + option.code = MVX_FW_SET_MIN_LUMA_CB_SIZE; + option.min_luma_cb_size = session->min_luma_cb_size; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set min luma cb size."); + return ret; + } + } + } + + if (codec == MVX_FORMAT_HEVC || + codec == MVX_FORMAT_VP9) { + option.code = MVX_FW_SET_TILES; + option.tile.rows = session->tile_rows > 0 ? session->tile_rows : 1; + option.tile.cols = session->tile_cols > 0 ? session->tile_cols : 1; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set tile dims."); + return ret; + } + } + + if (session->entropy_mode != MVX_ENTROPY_MODE_NONE && + codec == MVX_FORMAT_H264) { + option.code = MVX_FW_SET_ENTROPY_MODE; + option.entropy_mode = session->entropy_mode; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set entropy mode."); + return ret; + } + } + + if (session->intra_ipenalty_angular != MVX_INVALID_VAL){ + option.code = MVX_FW_SET_ENC_INTRA_IPENALTY_ANGULAR; + option.intra_ipenalty_angular = session->intra_ipenalty_angular; + ret = fw_set_option(session, &option); + if (ret != 0) + { + MVX_SESSION_WARN(session, + "Failed to set intra ipenalty angular"); + return ret; + } + } + + if (session->intra_ipenalty_planar != MVX_INVALID_VAL){ + option.code = MVX_FW_SET_ENC_INTRA_IPENALTY_PLANAR; + option.intra_ipenalty_planar = session->intra_ipenalty_planar; + ret = fw_set_option(session, &option); + if (ret != 0) + { + MVX_SESSION_WARN(session, + "Failed to set intra ipenalty planar"); + return ret; + } + } + + if (session->intra_ipenalty_dc != MVX_INVALID_VAL){ + option.code = MVX_FW_SET_ENC_INTRA_IPENALTY_DC; + option.intra_ipenalty_dc = session->intra_ipenalty_dc; + ret = fw_set_option(session, &option); + if (ret != 0) + { + MVX_SESSION_WARN(session, + "Failed to set intra ipenalty dc"); + return ret; + } + } + + if (session->inter_ipenalty_angular != MVX_INVALID_VAL){ + option.code = MVX_FW_SET_ENC_INTER_IPENALTY_ANGULAR; + option.inter_ipenalty_angular = session->inter_ipenalty_angular; + ret = fw_set_option(session, &option); + if (ret != 0) + { + MVX_SESSION_WARN(session, + "Failed to set inter ipenalty angular"); + return ret; + } + } + + if (session->inter_ipenalty_planar != MVX_INVALID_VAL){ + option.code = MVX_FW_SET_ENC_INTER_IPENALTY_PLANAR; + option.inter_ipenalty_planar = session->inter_ipenalty_planar; + ret = fw_set_option(session, &option); + if (ret != 0) + { + MVX_SESSION_WARN(session, + "Failed to set inter ipenalty planar"); + return ret; + } + } + + if (session->inter_ipenalty_dc != MVX_INVALID_VAL){ + option.code = MVX_FW_SET_ENC_INTER_IPENALTY_DC; + option.inter_ipenalty_dc = session->inter_ipenalty_dc; + ret = fw_set_option(session, &option); + if (ret != 0) + { + MVX_SESSION_WARN(session, + "Failed to set inter ipenalty dc"); + return ret; + } + } + + if (codec == MVX_FORMAT_H264 || + codec == MVX_FORMAT_HEVC) { + option.code = MVX_FW_SET_SLICE_SPACING_MB; + if (session->multi_slice_mode == + MVX_MULTI_SLICE_MODE_SINGLE) + option.slice_spacing_mb = 0; + else + option.slice_spacing_mb = + session->multi_slice_max_mb; + + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set slice spacing."); + return ret; + } + + option.code = MVX_FW_SET_CABAC_INIT_IDC; + option.cabac_init_idc = session->cabac_init_idc; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set CABAC init IDC."); + return ret; + } + if (session->crop_left != 0) { + option.code = MVX_FW_SET_CROP_LEFT; + option.crop_left = session->crop_left; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set crop left"); + return ret; + } + } + if (session->crop_right != 0) { + option.code = MVX_FW_SET_CROP_RIGHT; + option.crop_right = session->crop_right; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set crop right"); + return ret; + } + } + if (session->crop_top != 0) { + option.code = MVX_FW_SET_CROP_TOP; + option.crop_top = session->crop_top; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set crop top"); + return ret; + } + } + if (session->crop_bottom != 0) { + option.code = MVX_FW_SET_CROP_BOTTOM; + option.crop_bottom = session->crop_bottom; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set crop bottom"); + return ret; + } + } + + if (session->color_desc.range != 0 || session->color_desc.matrix_coeff != 2 || + session->color_desc.colour_primaries != 2 || + session->color_desc.transfer_characteristics != 2 || + session->color_desc.sar_height != 0 || session->color_desc.sar_width != 0 || + session->color_desc.aspect_ratio_idc != 0 || + session->color_desc.flags != 0) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_COLOUR_DESC; + option.colour_desc = session->color_desc; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set vui colour description"); + return ret; + } + } + + if (session->sei_userdata.flags) { + option.code = MVX_FW_SET_SEI_USERDATA; + option.userdata = session->sei_userdata; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set sei userdata"); + return ret; + } + } + + if (session->mvx_ltr.mode != 0 || session->mvx_ltr.period != 0){ + option.code = MVX_FW_SET_LONG_TERM_REF; + option.ltr.mode = session->mvx_ltr.mode; + option.ltr.period = session->mvx_ltr.period; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set ltr mode/period"); + return ret; + } + } + + if (session->gdr_number > 1 && session->gdr_period > 1){ + option.code = MVX_FW_SET_GDR_NUMBER; + option.gdr_number = session->gdr_number; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set gdr number"); + return ret; + } + option.code = MVX_FW_SET_GDR_PERIOD; + option.gdr_period = session->gdr_period; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set gdr period"); + return ret; + } + } + + } + + if (session->nHRDBufsize != 0) { + option.code = MVX_FW_SET_HRD_BUF_SIZE; + option.nHRDBufsize = session->nHRDBufsize; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to set HRD Buffer Size"); + return ret; + } + } + + if (codec == MVX_FORMAT_VP9) { + option.code = MVX_FW_SET_VP9_PROB_UPDATE; + option.vp9_prob_update = session->vp9_prob_update; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set VP9 prob update mode."); + return ret; + } + } + + if (session->mv_h_search_range != 0 && + session->mv_v_search_range != 0) { + option.code = MVX_FW_SET_MV_SEARCH_RANGE; + option.mv.x = session->mv_h_search_range; + option.mv.y = session->mv_v_search_range; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set motion vector search range."); + return ret; + } + } + + if (session->bitdepth_chroma != 0 && + session->bitdepth_luma != 0) { + option.code = MVX_FW_SET_BITDEPTH; + option.bitdepth.chroma = session->bitdepth_chroma; + option.bitdepth.luma = session->bitdepth_luma; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set bitdepth."); + return ret; + } + } + + if (session->force_chroma_format != 0) { + option.code = MVX_FW_SET_CHROMA_FORMAT; + option.chroma_format = session->force_chroma_format; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set chroma format."); + return ret; + } + } + + if( session->use_cust_rgb_to_yuv_mode == MVX_CUST_YUV2RGB_MODE_STANDARD ) + { + option.code = MVX_FW_SET_RGB_TO_YUV_MODE; + generate_standards_rgb2yuv_coef(session->rgb_to_yuv,&option.rgb2yuv_params); + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set rgb2yuv color conversion mode."); + return ret; + } + } + else if(session->use_cust_rgb_to_yuv_mode == MVX_CUST_YUV2RGB_MODE_CUSTOMIZED){ + option.code = MVX_FW_SET_RGB_TO_YUV_MODE; + memcpy(&option.rgb2yuv_params,&session->rgb2yuv_color_conv_coef,sizeof(struct mvx_rgb2yuv_color_conv_coef)); + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set rgb2yuv color conversion mode."); + return ret; + } + } + + if (session->band_limit != 0) { + option.code = MVX_FW_SET_BAND_LIMIT; + option.band_limit = session->band_limit; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set bandwidth limit."); + return ret; + } + } + + if (session->init_qpi != 0){ + option.code = MVX_FW_SET_INIT_QP_I; + option.init_qpi = session->init_qpi; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set init qp for I frame."); + return ret; + } + } + if (session->init_qpp != 0){ + option.code = MVX_FW_SET_INIT_QP_P; + option.init_qpp = session->init_qpp; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set init qp for P frame."); + return ret; + } + } + if (session->sao_luma != 0){ + option.code = MVX_FW_SET_SAO_LUMA; + option.sao_luma = session->sao_luma; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set sao luma."); + return ret; + } + } + if (session->sao_chroma != 0){ + option.code = MVX_FW_SET_SAO_CHROMA; + option.sao_chroma = session->sao_chroma; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set sao chroma."); + return ret; + } + } + if (session->qp_delta_i_p != 0){ + option.code = MVX_FW_SET_QP_DELTA_I_P; + option.qp_delta_i_p = session->qp_delta_i_p; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set delta qp for I frame and P frame."); + return ret; + } + } + if (session->ref_rb_en != 0){ + option.code = MVX_FW_SET_QP_REF_RB_EN; + option.ref_rb_en = session->ref_rb_en; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set ref_rb_en."); + return ret; + } + } + if (session->rc_qp_clip_top != 0){ + option.code = MVX_FW_SET_RC_CLIP_TOP; + option.rc_qp_clip_top = session->rc_qp_clip_top; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set rc_qp_clip_top."); + return ret; + } + } + if (session->rc_qp_clip_bot != 0){ + option.code = MVX_FW_SET_RC_CLIP_BOT; + option.rc_qp_clip_bot = session->rc_qp_clip_bot; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set rc_qp_clip_bot."); + return ret; + } + } + if (session->qpmap_qp_clip_top != 0){ + option.code = MVX_FW_SET_QP_MAP_CLIP_TOP; + option.qpmap_qp_clip_top = session->qpmap_qp_clip_top; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set qpmap_qp_clip_top."); + return ret; + } + } + if (session->qpmap_qp_clip_top != 0){ + option.code = MVX_FW_SET_QP_MAP_CLIP_BOT; + option.qpmap_qp_clip_bot = session->qpmap_qp_clip_bot; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set qpmap_qp_clip_bot."); + return ret; + } + } + if (session->lambda_scale.lambda_scale_i_q8 != 0 || + session->lambda_scale.lambda_scale_sqrt_i_q8 != 0 || + session->lambda_scale.lambda_scale_p_q8 != 0 || + session->lambda_scale.lambda_scale_sqrt_p_q8 != 0 || + session->lambda_scale.lambda_scale_b_ref_q8 != 0 || + session->lambda_scale.lambda_scale_sqrt_b_ref_q8 != 0 || + session->lambda_scale.lambda_scale_b_nonref_q8 != 0 || + session->lambda_scale.lambda_scale_sqrt_b_nonref_q8 != 0){ + option.code = MVX_FW_SET_ENC_LAMBDA_SCALE; + + memset(&option.lambda_scale, 0, sizeof(option.lambda_scale)); + option.lambda_scale.lambda_scale_i_q8 = (uint16_t)0x0100; // default is 1.0 + option.lambda_scale.lambda_scale_sqrt_i_q8 = (uint16_t)0x0100; + option.lambda_scale.lambda_scale_p_q8 = (uint16_t)0x0100; // default is 1.0 + option.lambda_scale.lambda_scale_sqrt_p_q8 = (uint16_t)0x0100; + option.lambda_scale.lambda_scale_b_ref_q8 = (uint16_t)0x0100; // default is 1.0 + option.lambda_scale.lambda_scale_sqrt_b_ref_q8 = (uint16_t)0x0100; + option.lambda_scale.lambda_scale_b_nonref_q8 = (uint16_t)0x0100; // default is 1.0 + option.lambda_scale.lambda_scale_sqrt_b_nonref_q8 = (uint16_t)0x0100; + + if( session->lambda_scale.lambda_scale_i_q8 != 0) + { + option.lambda_scale.lambda_scale_i_q8 = session->lambda_scale.lambda_scale_i_q8; + } + if( session->lambda_scale.lambda_scale_sqrt_i_q8 != 0) + { + option.lambda_scale.lambda_scale_sqrt_i_q8 = session->lambda_scale.lambda_scale_sqrt_i_q8; + } + if( session->lambda_scale.lambda_scale_p_q8 != 0) + { + option.lambda_scale.lambda_scale_p_q8 = session->lambda_scale.lambda_scale_p_q8; + } + if( session->lambda_scale.lambda_scale_sqrt_p_q8 != 0) + { + option.lambda_scale.lambda_scale_sqrt_p_q8 = session->lambda_scale.lambda_scale_sqrt_p_q8; + } + if( session->lambda_scale.lambda_scale_b_ref_q8 != 0) + { + option.lambda_scale.lambda_scale_b_ref_q8 = session->lambda_scale.lambda_scale_b_ref_q8; + } + if( session->lambda_scale.lambda_scale_sqrt_b_ref_q8 != 0) + { + option.lambda_scale.lambda_scale_sqrt_b_ref_q8 = session->lambda_scale.lambda_scale_sqrt_b_ref_q8; + } + if( session->lambda_scale.lambda_scale_b_nonref_q8 != 0) + { + option.lambda_scale.lambda_scale_b_nonref_q8 = session->lambda_scale.lambda_scale_b_nonref_q8; + } + if( session->lambda_scale.lambda_scale_sqrt_b_nonref_q8 != 0) + { + option.lambda_scale.lambda_scale_sqrt_b_nonref_q8 = session->lambda_scale.lambda_scale_sqrt_b_nonref_q8; + } + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set lambda scale."); + return ret; + } + } + + if (codec == MVX_FORMAT_JPEG) { + if (session->resync_interval >= 0) { + option.code = MVX_FW_SET_RESYNC_INTERVAL; + option.resync_interval = session->resync_interval; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set resync interval."); + return ret; + } + } + + if (session->jpeg_quality != 0 || session->jpeg_quality_luma != 0 || session->jpeg_quality_chroma != 0) { + uint8_t qtbl_chroma[MVX_FW_QUANT_LEN]; + uint8_t qtbl_luma[MVX_FW_QUANT_LEN]; + uint32_t quality_luma = session->jpeg_quality_luma != 0 ? session->jpeg_quality_luma : session->jpeg_quality; + uint32_t quality_chroma = session->jpeg_quality_chroma != 0 ? session->jpeg_quality_chroma : session->jpeg_quality; + option.code = MVX_FW_SET_QUANT_TABLE; + if(quality_luma) { + generate_quant_tbl(quality_luma, + qtbl_luma_ref, qtbl_luma); + option.quant_tbl.luma = qtbl_luma; + } + if (quality_chroma) { + generate_quant_tbl(quality_chroma, + qtbl_chroma_ref, qtbl_chroma); + option.quant_tbl.chroma = qtbl_chroma; + } + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set quantization table."); + return ret; + } + } + if (session->huff_table.type != 0) { + option.code = MVX_FW_SET_HUFF_TABLE; + memcpy(&option.huff_table, &session->huff_table, sizeof(struct mvx_huff_table)); + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set huff table."); + return ret; + } + } + } + + if ((session->port[MVX_DIR_INPUT].format ==MVX_FORMAT_YUV444 + ||session->port[MVX_DIR_INPUT].format ==MVX_FORMAT_YUV444_10 + ||session->port[MVX_DIR_INPUT].format ==MVX_FORMAT_YUV420_I420 + ||session->port[MVX_DIR_INPUT].format ==MVX_FORMAT_YUV420_I420_10 + ||session->port[MVX_DIR_INPUT].format ==MVX_FORMAT_Y + ||session->port[MVX_DIR_INPUT].format ==MVX_FORMAT_Y_10) + && session->forced_uv_value >= 0 && session->forced_uv_value < 0x400 ) { + option.code = MVX_FW_SET_ENC_FORCED_UV_VAL; + option.forced_uv_value = session->forced_uv_value; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set forced to uv value."); + return ret; + } + } + + if (session->crop.width != 0 + && session->crop.height !=0 + && session->crop.crop_en !=0) { + if (session->crop.width != session->port[MVX_DIR_INPUT].width + || session->crop.height != session->port[MVX_DIR_INPUT].height) { + option.code = MVX_FW_SET_ENC_SRC_CROPPING; + memcpy(&option.crop,&session->crop,sizeof(struct mvx_crop_cfg)); + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set enc src crop."); + return ret; + } + } + } + + if (session->mini_frame_cnt >= 2) { + option.code = MVX_FW_SET_MINI_FRAME_CNT; + option.mini_frame_cnt = session->mini_frame_cnt; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set mini frame buffer cnt."); + return ret; + } + } + + ret = fw_common_setup(session); + + return ret; +} + +static int fw_decoder_setup(struct mvx_session *session) +{ + int ret; + struct mvx_fw_set_option option; + + enum mvx_format codec; + enum mvx_direction dir; + + dir = get_bitstream_port(session); + codec = session->port[dir].format; + + if (codec == MVX_FORMAT_VC1 && + session->profile[codec] != MVX_PROFILE_NONE) { + option.code = MVX_FW_SET_PROFILE_LEVEL; + option.profile_level.profile = session->profile[codec]; + option.profile_level.level = session->level[codec]; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set profile/level."); + return ret; + } + } + + if (codec == MVX_FORMAT_AV1 && + session->fsf_mode != MVX_INVALID_VAL) { + option.code = MVX_FW_SET_FSF_MODE; + option.fsf_mode = session->fsf_mode; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set av1 fsf mode."); + return ret; + } + } + + if (session->ignore_stream_headers != MVX_TRI_UNSET) { + option.code = MVX_FW_SET_IGNORE_STREAM_HEADERS; + option.ignore_stream_headers = session->ignore_stream_headers; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set ignore stream headers."); + return ret; + } + } + + if (session->frame_reordering != MVX_TRI_UNSET) { + option.code = MVX_FW_SET_FRAME_REORDERING; + option.frame_reordering = session->frame_reordering; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set frame reordering."); + return ret; + } + } + + if (session->intbuf_size != 0) { + option.code = MVX_FW_SET_INTBUF_SIZE; + option.intbuf_size = session->intbuf_size; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set internal buffer size."); + return ret; + } + } + + if (session->dsl_frame.width != 0 && session->dsl_frame.height != 0) { + option.code = MVX_FW_SET_DSL_FRAME; + option.dsl_frame.width = session->dsl_frame.width; + option.dsl_frame.height = session->dsl_frame.height; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set DSL frame width/height."); + return ret; + } + } + + if (session->dsl_pos_mode >= 0 && session->dsl_pos_mode <= 2) { + option.code = MVX_FW_SET_DSL_MODE; + option.dsl_pos_mode = session->dsl_pos_mode; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set DSL mode."); + return ret; + } + } + + if (session->dsl_interp_mode >= 0 && session->dsl_interp_mode <= 1) { + option.code = MVX_FW_SET_DSL_INTERP_MODE; + option.dsl_interp_mode = session->dsl_interp_mode; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set DSL INTERP mode."); + return ret; + } + } + + if( mvx_is_rgb24(session->port[MVX_DIR_OUTPUT].format)) + { + option.code = MVX_FW_SET_DEC_YUV2RGB_PARAMS; + + if(session->use_cust_color_conv_coef) + { + ret =0; + memcpy(&option.yuv2rbg_csc_coef,&session->color_conv_coef,sizeof(struct mvx_color_conv_coef)); + } + else + { + ret =generate_standards_yuv2rgb_coef(session->color_conv_mode,&option.yuv2rbg_csc_coef); + } + if(0==ret) + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set yuv2rgb color conversion mode."); + return ret; + } + } + if (session->disabled_features != 0 || codec == MVX_FORMAT_AV1) { + option.code = MVX_FW_SET_DISABLE_FEATURES; + option.disabled_features = session->disabled_features; + //disable MVE_OPT_DISABLE_FEATURE_AFBC_LEGACY_REF for av1 + option.disabled_features |= codec == MVX_FORMAT_AV1 ? 0x100 : 0; + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set disabled features:%x.", option.disabled_features); + return ret; + } + } + + if (session->crop.crop_en !=0 + && session->crop.width > 0 + && session->crop.height > 0) { + option.code = MVX_FW_SET_DEC_DST_CROPPING; + memcpy(&option.crop,&session->crop,sizeof(struct mvx_crop_cfg)); + ret = fw_set_option(session, &option); + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to set dec dst crop."); + return ret; + } + } + + ret = fw_common_setup(session); + + return ret; +} + +static int fw_initial_setup(struct mvx_session *session) +{ + int ret; + enum mvx_direction dir; + enum mvx_format codec; + struct mvx_fw_set_option option; + + MVX_SESSION_INFO(session, "Firmware initial setup."); + + fw_set_debug(session, 5); + + option.code = MVX_FW_SET_WATCHDOG_TIMEOUT; + option.watchdog_timeout = fw_watchdog_timeout; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + + dir = get_bitstream_port(session); + codec = session->port[dir].format; + + ret = fw_job(session, session->job_frames); + if (ret != 0) + return ret; + + if (session->is_encoder) + ret = fw_encoder_setup(session); + else + ret = fw_decoder_setup(session); + + if (ret != 0) { + MVX_SESSION_WARN(session, + "Failed to perform initial setup.\n"); + return ret; + } + + ret = fw_state_change(session, MVX_FW_STATE_RUNNING); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to queue state change."); + return ret; + } + + ret = fw_ping(session); + if (ret != 0) { + MVX_SESSION_WARN(session, "Failed to put ping message."); + send_event_error(session, ret); + return ret; + } + + return ret; +} + +/** + * map_buffer() - Memory map buffer to MVE address space. + * + * Return 0 on success, else error code. + */ +static int map_buffer(struct mvx_session *session, + enum mvx_direction dir, + struct mvx_buffer *buf) +{ + mvx_mmu_va begin; + mvx_mmu_va end; + enum mvx_fw_region region; + int ret; + mvx_mmu_va *next_va; + ret = mutex_lock_interruptible(&session->fw.mem_mutex); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Cannot protect buffer VA."); + return ret; + } + + if (mvx_is_bitstream(session->port[dir].format)) { + region = MVX_FW_REGION_PROTECTED; + next_va = &session->fw.next_va_region_protected; + } + else if (mvx_is_frame(session->port[dir].format)) { + region = MVX_FW_REGION_FRAMEBUF; + next_va = &session->fw.next_va_region_outbuf; + } + else + return -EINVAL; + + ret = session->fw.ops.get_region(region, &begin, &end); + if (ret != 0) { + mutex_unlock(&session->fw.mem_mutex); + return ret; + } + + ret = mvx_buffer_map(buf, begin, end, next_va, + session->port[dir].size); + if (ret != 0) { + mutex_unlock(&session->fw.mem_mutex); + return ret; + } + + mutex_unlock(&session->fw.mem_mutex); + return 0; +} +static int queue_osd_config(struct mvx_session *session, + struct mvx_osd_config *osd_cfg) +{ + int ret = 0; + ret = fw_set_osd_config(session, MVX_FW_SET_OSD_CONFIG, + osd_cfg); + return ret; +} + +static int queue_roi_regions(struct mvx_session *session, + struct mvx_roi_config *roi_cfg) +{ + int ret = 0; + if ( roi_cfg->qp_present ) { + ret = fw_set_qp(session, MVX_FW_SET_QP_REGION, + roi_cfg->qp); + } + if ( roi_cfg->roi_present ) { + ret = fw_set_roi_regions(session, MVX_FW_SET_ROI_REGIONS, + roi_cfg); + } + return ret; +} + +static int queue_qp_epr(struct mvx_session *session, + struct mvx_buffer_param_qp *qp) +{ + int ret = 0; + ret = fw_set_epr_qp(session, MVX_FW_SET_EPR_QP, + *qp); + + return ret; +} + +static int queue_chr_cfg(struct mvx_session *session, + struct mvx_chr_cfg *chr_cfg) +{ + int ret = 0; + + ret = fw_set_chr_cfg(session, MVX_FW_SET_CHR_CFG, + chr_cfg); + return ret; +} + +static int queue_enc_stats(struct mvx_session *session, + struct mvx_enc_stats *stats) +{ + int ret = 0; + + ret = fw_set_enc_stats(session, MVX_FW_SET_STATS_MODE, + stats); + return ret; +} + +static struct mvx_session_format_map *get_format_map_by_mvx_format(enum mvx_format format) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mvx_raw_fmts); i++) + if (mvx_raw_fmts[i].format == format) + return &mvx_raw_fmts[i]; + + for (i = 0; i < ARRAY_SIZE(mvx_compressed_fmts); i++) + if (mvx_compressed_fmts[i].format == format) + return &mvx_compressed_fmts[i]; + + return NULL; +} + +static void revert_frame_format(struct mvx_session *session, + enum mvx_direction dir, + struct mvx_buffer *buf) +{ + struct mvx_session_port *port = &session->port[dir]; + struct mvx_session_format_map *new_format = get_format_map_by_mvx_format(port->format); + struct mvx_session_format_map *old_format = get_format_map_by_mvx_format(port->old_format); + if (new_format != NULL && old_format != NULL && old_format->bpp <= new_format->bpp) { + unsigned int size[MVX_BUFFER_NPLANES]; + unsigned int stride[MVX_BUFFER_NPLANES]; + int ret; + int i; + + for (i = 0; i < buf->nplanes; i++) + stride[i] = buf->planes[i].stride; + ret = mvx_buffer_frame_dim(port->old_format, port->width, port->height, &port->nplanes, + stride, size, session->setting_stride); + if (ret != 0) + return; // just return here as nothing is changed so far + + buf->format = port->old_format; + port->format = port->old_format; + port->pixelformat = port->old_pixelformat; + port->nplanes = buf->nplanes; + memcpy(port->stride, stride, sizeof(*stride) * MVX_BUFFER_NPLANES); + memcpy(port->size, size, sizeof(*size) * MVX_BUFFER_NPLANES); + session->event(session, MVX_SESSION_EVENT_PORT_CHANGED, (void *)MVX_DIR_OUTPUT); + } +} + +/** + * queue_buffer() - Put buffer to firmware queue. + * + * Return: 0 on success, else error code. + */ +static int queue_buffer(struct mvx_session *session, + enum mvx_direction dir, + struct mvx_buffer *buf) +{ + struct mvx_session_port *port = &session->port[dir]; + struct mvx_fw_msg msg; + struct mvx_seamless_target *seamless = &session->seamless_target; + unsigned int width; + unsigned int height; + unsigned int stride[MVX_BUFFER_NPLANES]; + unsigned i; + /* + * Vb2 cannot allocate buffers with bidirectional mapping, therefore + * proper direction should be set. + */ + // enum dma_data_direction dma_dir = + // (dir == MVX_DIR_OUTPUT) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + int ret; + + if (dir == MVX_DIR_OUTPUT) { + port->scaling_shift = (buf->flags & MVX_BUFFER_FRAME_FLAG_SCALING_MASK) >> 14; + } + if (mvx_buffer_is_mapped(buf) == false) { + /* + * In dual AFBC downscaling case, allocate a buffer for AFBC frame in + * original resolution and attach to mvx_buffer as plane 1. + */ + if (session->dual_afbc_downscaled && dir == MVX_DIR_OUTPUT && + port->interlaced == 0 && buf->nplanes == 1 && port->size[1] > 0) { + size_t npages; + struct mvx_mmu_pages *pages; + + npages = DIV_ROUND_UP(port->size[1], MVE_PAGE_SIZE); + + if (session->fw.fw_bin->securevideo != false) { + struct dma_buf *dmabuf; + + dmabuf = mvx_secure_mem_alloc(session->fw.fw_bin->secure.secure, + port->size[1], MVX_FW_REGION_FRAMEBUF); + if (IS_ERR(dmabuf)) + return -ENOMEM; + + pages = mvx_mmu_alloc_pages_dma_buf(session->dev, dmabuf, npages); + if (IS_ERR(pages)) { + dma_buf_put(dmabuf); + return -ENOMEM; + } + } else { + pages = mvx_mmu_alloc_pages(session->dev, npages, npages, GFP_KERNEL); + if (IS_ERR(pages)) + return -ENOMEM; + } + + buf->planes[1].pages = pages; + buf->nplanes++; + } + + ret = map_buffer(session, dir, buf); + if (ret != 0) + return ret; + } + if (dir == MVX_DIR_OUTPUT && port->buffer_allocated < port->buffer_min) { + buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC; + return -EAGAIN; + } + /* + * In case of port format and buffer format mismatch, it means + * driver updated format after received SEQ_PARAM message from + * fw, but client didn't get format and reallocate buffers. Try + * to change the port format to buffer foramt if the format is + * supported by VPU post-processing. + */ + if (mvx_is_frame(port->format) != false && !session->is_encoder && + port->format != port->old_format) + revert_frame_format(session, dir, buf); + /* + * Update frame dimensions. They might have changed due to a resolution + * change. + */ + if(MVX_DIR_OUTPUT == dir + && session->port[MVX_DIR_INPUT].format <= MVX_FORMAT_BITSTREAM_LAST + && seamless->seamless_mode !=0) + { + width = seamless->target_width < port->width ? port->width : seamless->target_width; + height = seamless->target_height < port->height ? port->height : seamless->target_height; + for(i=0;itarget_stride[i] < port->stride[i] ? port->stride[i] : seamless->target_stride[i]; + } + } + else + { + width = port->width; + height = port->height; + for(i=0;istride[i]; + } + } + if (mvx_is_afbc(port->format) != false) { + i = 0; + if(session->dual_afbc_downscaled && dir == MVX_DIR_OUTPUT && port->interlaced == 0 && buf->nplanes > 1) + { + ret = mvx_buffer_afbc_set(buf, port->format, width, + height, port->afbc_width_in_superblocks_downscaled, + port->size[i], 0, i); + if (ret != 0) + return ret; + i++; + } + ret = mvx_buffer_afbc_set(buf, port->format, width, + height, port->afbc_width, + port->size[i], port->interlaced, i); + if (ret != 0) + return ret; + } else if (mvx_is_frame(port->format) != false) { + ret = mvx_buffer_frame_set(buf, port->format, width, + height, stride, + port->size, + port->interlaced); + if (ret != 0) + return ret; + + } + + // if (!session->isession.securevideo && dma_dir == DMA_TO_DEVICE) { + // ret = mvx_buffer_synch(buf, dma_dir); + // if (ret != 0) + // return ret; + // } + if (session->pending_switch_out) { + session->pending_switch_out = false; + fw_switch_out(session); + } + + msg.code = MVX_FW_CODE_BUFFER; + msg.buf = buf; + + MVX_SESSION_INFO(session, + "Firmware req: Buffer. dir=%u, len=[%u, %u, %u], flags=0x%08x, eos=%u, interlace=%u", + buf->dir, + buf->planes[0].filled, + buf->planes[1].filled, + buf->planes[2].filled, + buf->flags, + (buf->flags & MVX_BUFFER_EOS) != 0, + (buf->flags & MVX_BUFFER_INTERLACE) != 0); + + ret = session->fw.ops.put_message(&session->fw, &msg); + if (ret != 0) + goto send_error; + + port->buffer_count++; + port->buffers_in_window++; + port->flushed = false; + ret = send_irq(session); + if (ret != 0) + goto send_error; + + return 0; + +send_error: + send_event_error(session, ret); + return ret; +} + +/** + * queue_pending_buffers() - Queue pending buffers. + * + * Buffer that are queued when the port is still stream off will be put in the + * pending queue. Once both input- and output ports are stream on the pending + * buffers will be forwarded to the firmware. + * + * Return: 0 on success, else error code. + */ +static int queue_pending_buffers(struct mvx_session *session, + enum mvx_direction dir) +{ + struct mvx_buffer *buf; + struct mvx_buffer *tmp; + int roi_config_num = 0; + int roi_config_index = 0; + int qp_num = 0; + int qp_index = 0; + int chr_cfg_num = 0; + int chr_cfg_index = 0; + int enc_stats_num = 0; + int enc_stats_index = 0; + int osd_cfg_index = 0; + int osd_cfg_num = 0; + int pending_buf_idx = 0; + int osd_buffer_idx = 0; + struct mvx_roi_config roi_config; + int ret = 0; + + if (dir == MVX_DIR_INPUT && session->port[dir].roi_config_num > 0) { + roi_config_num = session->port[dir].roi_config_num; + } + if (dir == MVX_DIR_INPUT && session->port[dir].qp_num > 0) { + qp_num = session->port[dir].qp_num; + } + if (dir == MVX_DIR_INPUT && session->port[dir].chr_cfg_num > 0) { + chr_cfg_num = session->port[dir].chr_cfg_num; + } + if (dir == MVX_DIR_INPUT && session->port[dir].enc_stats_num > 0) { + enc_stats_num = session->port[dir].enc_stats_num; + } + if (dir == MVX_DIR_INPUT && session->port[dir].osd_cfg_num > 0) { + osd_cfg_num = session->port[dir].osd_cfg_num; + } + list_for_each_entry_safe(buf, tmp, &session->port[dir].buffer_queue, + head) { + buf->flags = buf->in_flags; + if ((buf->flags & MVX_BUFFER_FRAME_FLAG_ROI) == MVX_BUFFER_FRAME_FLAG_ROI && + roi_config_index < roi_config_num) { + roi_config = session->port[dir].roi_config_queue[roi_config_index]; + ret = queue_roi_regions(session, &roi_config); + roi_config_index++; + } + if ((buf->flags & MVX_BUFFER_FRAME_FLAG_GENERAL) == MVX_BUFFER_FRAME_FLAG_GENERAL && + qp_index < qp_num) { + ret = queue_qp_epr(session, &session->port[dir].qp_queue[qp_index]); + qp_index++; + } + if ((buf->flags & MVX_BUFFER_FRAME_FLAG_CHR) == MVX_BUFFER_FRAME_FLAG_CHR && + chr_cfg_index < chr_cfg_num) { + ret = queue_chr_cfg(session, &session->port[dir].chr_cfg_queue[chr_cfg_index]); + chr_cfg_index++; + } + if (enc_stats_index < enc_stats_num && + session->port[dir].enc_stats_queue[enc_stats_index].pic_index == pending_buf_idx ) { + ret = queue_enc_stats(session, &session->port[dir].enc_stats_queue[enc_stats_index]); + enc_stats_index++; + } + if (osd_cfg_index < osd_cfg_num && + session->port[dir].osd_cfg_queue[osd_cfg_index].pic_index == osd_buffer_idx ) { + ret = queue_osd_config(session, &session->port[dir].osd_cfg_queue[osd_cfg_index]); + osd_cfg_index++; + } + ret = queue_buffer(session, dir, buf); + pending_buf_idx++; + if (!(buf->flags & MVX_BUFFER_FRAME_FLAG_OSD_MASK)) { + osd_buffer_idx++;//check for yuv bffer + } + if ((buf->flags & MVX_BUFFER_FRAME_NEED_REALLOC) == MVX_BUFFER_FRAME_NEED_REALLOC) { + session->event(session, MVX_SESSION_EVENT_BUFFER, buf); + } else if (ret != 0) { + break; + } + list_del(&buf->head); + } + + session->port[dir].roi_config_num = 0; + session->port[dir].qp_num = 0; + session->port[dir].chr_cfg_num = 0; + session->port[dir].enc_stats_num = 0; + return ret; +} + +/** + * return_done_buffers() - Return buffers in done_queue to client. + * + * When resolution changed in non-keyframe, keep frames with the former resolution + * in session->buffer_done_queue. + * + * In alloc_param msg handler, add the former resolution buffers and return them + * to client and send PORT_CHANGED EVENT later. + * + */ +static void return_done_buffers(struct mvx_session *session, + enum mvx_direction dir) +{ + struct mvx_buffer *buf; + struct mvx_buffer *tmp; + list_for_each_entry_safe(buf, tmp, + &session->port[MVX_DIR_OUTPUT].buffer_done_queue, head) { + list_del(&buf->head); + session->event(session, MVX_SESSION_EVENT_BUFFER, buf); + } +} + +/** + * fw_bin_ready() - Complete firmware configuration. + * + * The firmware binary load has completed and the firmware configuration can + * begin. + * + * If the session is no longer 'stream on' (someone issued 'stream off' before + * the firmware load completed) the firmware binary is put back to the cache. + * + * Else the the client session is registered and the firmware instance is + * constructed. + */ +static void fw_bin_ready(struct mvx_fw_bin *bin, + void *arg, + bool same_thread) +{ + struct mvx_session *session = arg; + int lock_failed = 1; + int ret; + + /* + * Only lock the mutex if the firmware binary was loaded by a + * background thread. + */ + if (same_thread == false) { + lock_failed = mutex_lock_interruptible(session->isession.mutex); + if (lock_failed != 0) { + send_event_error(session, lock_failed); + goto put_fw_bin; + } + } + + /* Return firmware binary if session is no longer 'stream on'. */ + if (!is_stream_on(session)) + goto put_fw_bin; + + /* Create client session. */ + session->isession.core_mask = session->client_ops->get_core_mask( + session->client_ops); + session->isession.ncores = hweight32(session->isession.core_mask); + session->isession.l0_pte = mvx_mmu_set_pte( + MVX_ATTR_PRIVATE, virt_to_phys(session->mmu.page_table), + MVX_ACCESS_READ_WRITE); + + session->csession = session->client_ops->register_session( + session->client_ops, &session->isession); + if (IS_ERR(session->csession)) { + ret = PTR_ERR(session->csession); + send_event_error(session, ret); + goto put_fw_bin; + } + + /* Construct the firmware instance. */ + ret = mvx_fw_factory(&session->fw, bin, &session->mmu, + session, session->client_ops, session->csession, + session->isession.core_mask, + session->dentry); + if (ret != 0) { + send_event_error(session, ret); + goto unregister_csession; + } + + session->fw_bin = bin; + complete(&session->fw_loaded); + + mvx_fw_cache_log(bin, session->csession); + + ret = fw_initial_setup(session); + if (ret != 0) + goto unregister_csession; + + ret = queue_pending_buffers(session, MVX_DIR_INPUT); + if (ret != 0) + goto unregister_csession; + + ret = queue_pending_buffers(session, MVX_DIR_OUTPUT); + if (ret != 0) + goto unregister_csession; + + if (lock_failed == 0) + mutex_unlock(session->isession.mutex); + + mvx_session_put(session); + + return; + +unregister_csession: + session->client_ops->unregister_session(session->csession); + session->csession = NULL; + +put_fw_bin: + mvx_fw_cache_put(session->cache, bin); + session->fw_bin = NULL; + complete(&session->fw_loaded); + + if (lock_failed == 0) + mutex_unlock(session->isession.mutex); + + mvx_session_put(session); +} + +static int calc_afbc_size(struct mvx_session *session, + enum mvx_format format, + unsigned int width, + unsigned int height, + bool tiled_headers, + bool tiled_body, + bool superblock, + bool interlaced) +{ + static const unsigned int mb_header_size = 16; + unsigned int payload_align = 128; + unsigned int mb_size; + int size; + + /* Calculate width and height in super blocks. */ + /** + * FIXME: Add superblock calculation back. + * So far, doesn't find a good way to transfer superblock info when set/try + * format, in 16x16 case, the calculated size of 32x8 tiled may larger than + * actually allocated by client(e.g gralloc). And there's no actually 32x8 encode + * requirement. So, remove superblock calculation temproarily. + **/ + if (/* superblock != false */ false) { + width = DIV_ROUND_UP(width, 32); + height = DIV_ROUND_UP(height, 8) + 1; + } else { + width = DIV_ROUND_UP(width, 16); + height = DIV_ROUND_UP(height, 16) + 1; + } + + /* Round up size to 8x8 tiles. */ + if (tiled_headers != false || tiled_body != false) { + width = roundup(width, 8); + height = roundup(height, 8); + } + + switch (format) { + case MVX_FORMAT_YUV420_AFBC_8: + mb_size = 384; + break; + case MVX_FORMAT_YUV420_AFBC_10: + mb_size = 480; + break; + case MVX_FORMAT_YUV422_AFBC_8: + mb_size = 512; + break; + case MVX_FORMAT_YUV422_AFBC_10: + mb_size = 656; + break; + case MVX_FORMAT_Y_AFBC_8: + mb_size = 256; + break; + case MVX_FORMAT_Y_AFBC_10: + mb_size = 320; + break; + default: + MVX_SESSION_WARN(session, + "Unsupported AFBC format. format=%u.", + format); + return -EINVAL; + } + + /* Round up tiled body to 128 byte boundary. */ + if (tiled_body != false) + mb_size = roundup(mb_size, payload_align); + + if (interlaced != false) + height = DIV_ROUND_UP(height, 2); + + /* Calculate size of AFBC makroblock headers. */ + size = roundup(width * height * mb_header_size, payload_align); + size += roundup(width * height * mb_size, payload_align); + + if (interlaced != false) + size *= 2; + + return size; +} + +static size_t divRoundUp(size_t value, size_t round) +{ + return (value + round - 1) / round; +} + +static size_t roundUp(size_t value, size_t round) +{ + return divRoundUp(value, round) * round; +} + +static int try_format(struct mvx_session *session, + enum mvx_direction dir, + enum mvx_format format, + unsigned int *width, + unsigned int *height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + bool *interlaced) +{ + int ret = 0; + + if ((session->is_encoder && dir == MVX_DIR_INPUT && !mvx_is_frame(format)) || + (!session->is_encoder && dir == MVX_DIR_OUTPUT && !mvx_is_frame(format))) + return -EINVAL; + + if (dir == MVX_DIR_INPUT && format == MVX_FORMAT_JPEG) { + /* Limit width and height to 32k for jpeg decode. */ + *width = min_t(unsigned int, *width, 32768); + *height = min_t(unsigned int, *height, 32768); + } + else if ((dir == MVX_DIR_OUTPUT && format == MVX_FORMAT_JPEG) || + (dir == MVX_DIR_INPUT && mvx_is_frame(format))) { + /* Limit output width and height to 16k for jpeg encode. */ + *width = min_t(unsigned int, *width, 16384); + *height = min_t(unsigned int, *height, 16384); + } + else { + /* Limit width and height to 8k. */ + *width = min_t(unsigned int, *width, 8192); + *height = min_t(unsigned int, *height, 8192); + } + + /* Limit minimum width and height. */ + *width = max_t(unsigned int, *width, 2); + *height = max_t(unsigned int, *height, 2); + + if (mvx_is_frame(format) && !mvx_is_afbc(format) && dir == MVX_DIR_OUTPUT) { + *width = session->orig_width >> session->port[MVX_DIR_OUTPUT].scaling_shift; + *height = session->orig_height >> session->port[MVX_DIR_OUTPUT].scaling_shift; + if (session->dsl_frame.width >= 16 && session->dsl_frame.height >= 16) { + *width = session->dsl_frame.width; + *height = session->dsl_frame.height; + } else if (session->dsl_ratio.hor != 1 || session->dsl_ratio.ver != 1) { + *width = session->orig_width / session->dsl_ratio.hor; + *height = session->orig_height / session->dsl_ratio.ver; + *width &= ~(1); + *height &= ~(1); + } + } else if (dir == MVX_DIR_OUTPUT) { + *width = session->orig_width; + *height = session->orig_height; + } + + if( dir == MVX_DIR_OUTPUT + && !mvx_is_afbc(format) + && session->crop.crop_en !=0 + && session->crop.width >0 + && session->crop.height >0 + && session->crop.width +session->crop.x <= session->orig_width + && session->crop.height +session->crop.y <= session->orig_height) + { + *width=session->crop.width; + *height=session->crop.height; + } + /* Interlaced input is not supported by the firmware. */ + if (dir == MVX_DIR_INPUT && mvx_is_frame(format)) + *interlaced = false; + + if (mvx_is_afbc(format) != false) { + unsigned int afbc_alloc_bytes = + session->port[dir].afbc_alloc_bytes; + if (*nplanes <= 0) + size[0] = 0; + size[1] = 0; + + if(dir == MVX_DIR_OUTPUT + && session->dual_afbc_downscaled + && *interlaced ==0 + && session->port[dir].afbc_alloc_bytes_downscaled !=0 ) + { + unsigned int afbc_alloc_bytes_downscaled = + session->port[dir].afbc_alloc_bytes_downscaled; + + size[0] = roundup(afbc_alloc_bytes_downscaled, PAGE_SIZE); + size[1] = max_t(unsigned int, PAGE_SIZE, + afbc_alloc_bytes); + size[1] = roundup(size[1], PAGE_SIZE); + + *width = session->orig_width >> 1; + *height = session->orig_height >> 1; + } else { + if (dir == MVX_DIR_INPUT) { + /* it is basically a worst-case calcualtion based on a size rounded up to tile size*/ + int s1 = calc_afbc_size(session, format, *width, + *height, true, true, false, //*height, false, false, false, + *interlaced); + int s2 = calc_afbc_size(session, format, *width, + *height, true, true, true, //*height, false, false, false, + *interlaced); + int s = max_t(unsigned int, s1, s2); + if (s < 0) + return s; + + size[0] = max_t(unsigned int, size[0], s); + } + + if (*interlaced != false) + afbc_alloc_bytes *= 2; + + /* Size should be at least one page. */ + size[0] = max_t(unsigned int, size[0], + PAGE_SIZE); + size[0] = max_t(unsigned int, size[0], + afbc_alloc_bytes); + size[0] = roundup(size[0], PAGE_SIZE); + } + + *nplanes = 1; + } else if (mvx_is_frame(format) != false) { + uint32_t tmp_height = session->mini_frame_cnt >= 2? roundUp(divRoundUp(*height, session->mini_frame_cnt), 64) : *height; + ret = mvx_buffer_frame_dim(format, *width, tmp_height, nplanes, + stride, size, session->setting_stride); + } else { + /* + * For compressed formats the size should be the maximum number + * of bytes an image is expected to become. This is calculated + * as width * height * 2 B/px / 2. Size should be at least one + * page. For decode, limit to 256MB. For encode, some client like + * gstreamer may allocate 5 bitstream buffers, so limit to 192MB + * which is big enough for 16Kx16K random pixel JPEG encoding + * with quality level 90. + */ + + stride[0] = 0; + + if (*nplanes <= 0) + size[0] = 0; + + size[0] = max_t(unsigned int, size[0], PAGE_SIZE); + + if ((*width) * (*height) < 720 * 480) + size[0] = max_t(unsigned int, size[0], (*width) * (*height) * 3); + else + size[0] = max_t(unsigned int, size[0], (*width) * (*height)); + + if (dir == MVX_DIR_OUTPUT) + size[0] = min_t(unsigned int, size[0], (MAX_BITSTREAM_BUFFER_SIZE * 3) >> 2); + else + size[0] = min_t(unsigned int, size[0], MAX_BITSTREAM_BUFFER_SIZE); + size[0] = roundup(size[0], PAGE_SIZE); + + *nplanes = 1; + } + + MVX_SESSION_INFO(session, + "%s(), dir=%d fmt=%d w=%d h=%d planes=%d interlaced=%d stride=[%d %d %d] size=[%d %d %d]", + __func__, dir, format, *width, *height, *nplanes, *interlaced, + stride[0], stride[1], stride[2], + size[0], size[1], size[2]); + return ret; +} + +static void watchdog_work(struct work_struct *work) +{ + struct mvx_session *session = + container_of(work, struct mvx_session, watchdog_work); + int log_level = session->watchdog_count > 2 ? MVX_WAR_LOG_LEVEL : MVX_LOG_INFO; + int ret; + + mutex_lock(session->isession.mutex); + + MVX_SESSION_LOG(log_level, session, "Watchdog timeout. count=%u.", + session->watchdog_count); + + /* watchdog_count < 15 means total timeout is 12s */ + if (session->watchdog_count++ < 15) { + if (session->switched_in) { + /* Restart watchdog. */ + unsigned int timeout_ms = session_watchdog_timeout * session->watchdog_count; + watchdog_start(session, timeout_ms, false); + } + } else { + /* Print debug information. */ + print_debug(session); + /* Request firmware to dump its state. */ + fw_dump(session); + session->client_ops->terminate(session->csession); + switch_out_rsp(session); + send_event_error(session, -ETIME); + } + + ret = kref_put(&session->isession.kref, session->isession.release); + if (ret != 0) + return; + + mutex_unlock(session->isession.mutex); + + session->client_ops->soft_irq(session->csession); +} + +static void watchdog_timeout(struct timer_list *timer) +{ + struct mvx_session *session = + container_of(timer, struct mvx_session, watchdog_timer); + + queue_work(system_unbound_wq, &session->watchdog_work); +} + +#if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE +static void watchdog_timeout_legacy(unsigned long data) +{ + watchdog_timeout((struct timer_list *)data); +} + +#endif + +static void filter_decode_output_formats(enum mvx_format compressed_format, + int bitdepth, int chroma_format, uint64_t *formats) +{ + mvx_clear_bit(MVX_FORMAT_RGBA_8888, formats); + mvx_clear_bit(MVX_FORMAT_BGRA_8888, formats); + mvx_clear_bit(MVX_FORMAT_ARGB_8888, formats); + mvx_clear_bit(MVX_FORMAT_ABGR_8888, formats); + + if (compressed_format == MVX_FORMAT_JPEG) { + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_Y_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_Y210, formats); + mvx_clear_bit(MVX_FORMAT_Y_10, formats); + if (chroma_format == MVX_CHROMA_FORMAT_MONO || + chroma_format == MVX_CHROMA_FORMAT_420) { + mvx_clear_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + if (chroma_format == MVX_CHROMA_FORMAT_MONO) + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + else + mvx_clear_bit(MVX_FORMAT_Y_AFBC_8, formats); + } + } else if (compressed_format == MVX_FORMAT_AVS2 || + compressed_format == MVX_FORMAT_H264 || + compressed_format == MVX_FORMAT_HEVC || + compressed_format == MVX_FORMAT_VP9 || + compressed_format == MVX_FORMAT_AV1) { + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_UYVY, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_Y210, formats); + /* 8bit to 10bit post-processing is not supported */ + if (bitdepth == 8) { + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_clear_bit(MVX_FORMAT_Y_10, formats); + if (chroma_format == MVX_CHROMA_FORMAT_MONO) + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + else + mvx_clear_bit(MVX_FORMAT_Y_AFBC_8, formats); + } else if (bitdepth == 10) { + if (chroma_format == MVX_CHROMA_FORMAT_MONO) + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + else + mvx_clear_bit(MVX_FORMAT_Y_AFBC_10, formats); + } + } else if (compressed_format < MVX_FORMAT_BITSTREAM_LAST) { + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_Y_AFBC_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV420_P010, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_Y210, formats); + mvx_clear_bit(MVX_FORMAT_Y_10, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_AFBC_8, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_YUY2, formats); + mvx_clear_bit(MVX_FORMAT_YUV422_UYVY, formats); + if (chroma_format == MVX_CHROMA_FORMAT_MONO) + mvx_clear_bit(MVX_FORMAT_YUV420_AFBC_8, formats); + else + mvx_clear_bit(MVX_FORMAT_Y_AFBC_8, formats); + } +} + +static void reset_resolution(struct mvx_session *session, + unsigned int *width, unsigned int *height, enum mvx_direction dir) +{ + if (dir == MVX_DIR_INPUT && *width >= 0 && *height >= 0 && + session->orig_width < 144 && session->orig_height < 144) { + session->orig_width = *width; + session->orig_height = *height; + } else if (session->orig_width >= 144 && session->orig_height >= 144) { + *width = session->orig_width; + *height = session->orig_height; + } +} + +static int flush_and_qbufs(struct mvx_session *session, enum mvx_direction dir) +{ + int ret = 0; + + if (session->port[MVX_DIR_OUTPUT].received_seq_param || + session->fw_state == MVX_FW_STATE_STOPPED) { + session->port[MVX_DIR_OUTPUT].received_seq_param = false; + ret = fw_flush(session, dir); + if (ret != 0) + return ret; + ret = wait_pending(session); + if (ret != 0) + return ret; + } + ret = queue_pending_buffers(session, dir); + if (ret != 0) + return ret; + + return switch_in(session); +} + +static void flush_and_qbufs_work(struct work_struct *work) +{ + struct mvx_session *session = + container_of(work, struct mvx_session, flush_and_queue_work); + int ret; + + ret = mutex_lock_interruptible(session->isession.mutex); + if (ret != 0) + return; + flush_and_qbufs(session, MVX_DIR_OUTPUT); + mutex_unlock(session->isession.mutex); +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_session_construct(struct mvx_session *session, + struct device *dev, + struct mvx_client_ops *client_ops, + struct mvx_fw_cache *cache, + struct mutex *mutex, + void (*destructor)(struct mvx_session *session), + void (*event)(struct mvx_session *session, + enum mvx_session_event event, + void *arg), + struct dentry *dentry, + bool is_encoder) +{ + int i; + int ret; + + if (event == NULL || destructor == NULL) + return -EINVAL; + + memset(session, 0, sizeof(*session)); + memset(session->setting_stride, 0, sizeof(session->setting_stride)); + memset(session->port[MVX_DIR_INPUT].display_size, 0, sizeof(session->port[MVX_DIR_INPUT].display_size)); + memset(session->port[MVX_DIR_OUTPUT].display_size, 0, sizeof(session->port[MVX_DIR_OUTPUT].display_size)); + session->dev = dev; + session->client_ops = client_ops; + session->cache = cache; + kref_init(&session->isession.kref); + session->isession.release = session_destructor; + session->isession.mutex = mutex; + session->destructor = destructor; + session->event = event; + session->fw_event.fw_bin_ready = fw_bin_ready; + session->fw_event.arg = session; + session->fw_state = MVX_FW_STATE_STOPPED; + init_waitqueue_head(&session->waitq); + session->dentry = dentry; + session->port[MVX_DIR_INPUT].buffer_min = 1; + session->port[MVX_DIR_OUTPUT].buffer_min = 1; + session->port[MVX_DIR_OUTPUT].buffer_max = VIDEO_MAX_FRAME; + session->port[MVX_DIR_INPUT].buffer_allocated = 0;//1; + session->port[MVX_DIR_OUTPUT].buffer_allocated = 0;//1; + session->port[MVX_DIR_INPUT].scaling_shift = 0; + session->port[MVX_DIR_OUTPUT].scaling_shift = 0; + session->port[MVX_DIR_INPUT].afbc_alloc_bytes = 0; + session->port[MVX_DIR_OUTPUT].afbc_alloc_bytes = 0; + session->port[MVX_DIR_INPUT].afbc_width = 0; + session->port[MVX_DIR_OUTPUT].afbc_width = 0; + session->port[MVX_DIR_INPUT].afbc_alloc_bytes_downscaled = 0; + session->port[MVX_DIR_OUTPUT].afbc_alloc_bytes_downscaled = 0; + session->port[MVX_DIR_INPUT].afbc_width_in_superblocks_downscaled = 0; + session->port[MVX_DIR_OUTPUT].afbc_width_in_superblocks_downscaled = 0; + session->port[MVX_DIR_INPUT].last_interlaced_from_sps = 0; + session->port[MVX_DIR_OUTPUT].last_interlaced_from_sps = 0; + session->stream_escaping = MVX_TRI_UNSET; + session->ignore_stream_headers = MVX_TRI_UNSET; + session->frame_reordering = MVX_TRI_UNSET; + session->constr_ipred = MVX_TRI_UNSET; + session->entropy_sync = MVX_TRI_UNSET; + session->temporal_mvp = MVX_TRI_UNSET; + session->resync_interval = -1; + session->port[MVX_DIR_OUTPUT].roi_config_num = 0; + session->port[MVX_DIR_INPUT].roi_config_num = 0; + session->port[MVX_DIR_OUTPUT].qp_num = 0; + session->port[MVX_DIR_INPUT].qp_num = 0; + session->crop_left = 0; + session->crop_right = 0; + session->crop_top = 0; + session->crop_bottom = 0; + session->dsl_ratio.hor = 1; + session->dsl_ratio.ver = 1; + session->dsl_pos_mode = -1;//disable by default + session->rc_bit_i_mode = 0; + session->rc_bit_i_ratio = 0; + session->mulit_sps_pps = 0; + session->enable_visual = 0; + session->forced_uv_value =0x400; + session->dsl_interp_mode =0xffff; + session->color_conv_mode = MVX_YUV_TO_RGB_MODE_BT601_LIMT; + session->use_cust_color_conv_coef =false; + session->use_cust_rgb_to_yuv_mode = MVX_CUST_YUV2RGB_MODE_UNSET; + session->dual_afbc_downscaled=0; + session->job_frames = 1; + session->fps_n = 60; // 60fps by default + session->fps_d = 1; + session->coded_chroma_format = MVX_CHROMA_FORMAT_422; + session->is_encoder = is_encoder; + session->enable_buffer_dump = enable_buffer_dump; + session->priority = V4L2_SESSION_PRIORITY_DEFAULT; + session->intra_ipenalty_angular = MVX_INVALID_VAL; + session->intra_ipenalty_planar = MVX_INVALID_VAL; + session->intra_ipenalty_dc = MVX_INVALID_VAL; + session->inter_ipenalty_angular = MVX_INVALID_VAL; + session->inter_ipenalty_planar = MVX_INVALID_VAL; + session->inter_ipenalty_dc = MVX_INVALID_VAL; + + if (session->enable_buffer_dump == true) { + char input_file[64]; + scnprintf(input_file, sizeof(input_file) - 1, + "/data/input_session_%p.bin", session); + session->data_fp = filp_open(input_file, O_RDWR | O_CREAT, 0644); + if (IS_ERR(session->data_fp)) { + MVX_SESSION_WARN(session, "warning: open dump file(%s) fail", input_file); + session->data_fp = NULL; + } + } + + memset(&session->color_conv_coef,0,sizeof(struct mvx_color_conv_coef)); + memset(&session->crop,0,sizeof(struct mvx_crop_cfg)); + memset(&session->seamless_target,0,sizeof(struct mvx_seamless_target)); + memset(&session->lambda_scale, 0, sizeof(session->lambda_scale)); + init_completion(&session->fw_loaded); + + ret = mvx_mmu_construct(&session->mmu, session->dev); + if (ret != 0) + return ret; + + for (i = 0; i < MVX_DIR_MAX; i++) { + INIT_LIST_HEAD(&session->port[i].buffer_queue); + INIT_LIST_HEAD(&session->port[i].buffer_done_queue); + } + +#if KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE + timer_setup(&session->watchdog_timer, watchdog_timeout, 0); +#else + setup_timer(&session->watchdog_timer, watchdog_timeout_legacy, + (uintptr_t)&session->watchdog_timer); +#endif + INIT_WORK(&session->watchdog_work, watchdog_work); + INIT_WORK(&session->flush_and_queue_work, flush_and_qbufs_work); + + if (mvx_log_perf.enabled & MVX_LOG_PERF_FPS) { + session->ts = vmalloc(sizeof(struct timespec64) * MAX_RT_FPS_FRAMES); + mutex_init(&session->fps_mutex); + } + + return 0; +} + +void mvx_session_destruct(struct mvx_session *session) +{ + /* Destruct the session object. */ + + MVX_SESSION_INFO(session, "Destroy session."); + + // dump input IVF header into a new file in the same dir with bitstream file + if (session->data_fp != NULL) { + filp_close(session->data_fp, NULL); + dump_ivf_header(session); + } + release_fw_bin(session); + mvx_mmu_destruct(&session->mmu); + if (session->ts) + vfree(session->ts); +} + +void mvx_session_get(struct mvx_session *session) +{ + kref_get(&session->isession.kref); +} + +int mvx_session_put(struct mvx_session *session) +{ + return kref_put(&session->isession.kref, + session->isession.release); +} + +unsigned int mvx_session_ref_read(struct mvx_session *session) +{ + return kref_read(&session->isession.kref); +} + +struct mvx_session_format_map *mvx_session_enum_format(struct mvx_session *session, + enum mvx_direction dir, + int index) +{ + struct mvx_session_format_map *mvx_fmts = NULL; + bool is_encoder = session->is_encoder; + uint64_t formats; + int idx; + int i; + int array_size; + + mvx_session_get_formats(session, dir, &formats); + if (!is_encoder && dir == MVX_DIR_OUTPUT) + filter_decode_output_formats(session->port[MVX_DIR_INPUT].format, + max(session->bitdepth_luma, session->bitdepth_chroma), + session->coded_chroma_format, &formats); + + if ((is_encoder && dir == MVX_DIR_OUTPUT) || + (!is_encoder && dir == MVX_DIR_INPUT)) { + mvx_fmts = mvx_compressed_fmts; + array_size = ARRAY_SIZE(mvx_compressed_fmts); + } else { + mvx_fmts = mvx_raw_fmts; + array_size = ARRAY_SIZE(mvx_raw_fmts); + } + + for (i = 0, idx = 0; i < array_size; i++) { + if (mvx_test_bit(mvx_fmts[i].format, &formats)) { + if (index == idx) + return &mvx_fmts[i]; + + idx++; + } + } + + return NULL; +} + +void mvx_session_get_formats(struct mvx_session *session, + enum mvx_direction dir, + uint64_t *formats) +{ + uint64_t fw_formats; + + session->client_ops->get_formats(session->client_ops, dir, formats); + mvx_fw_cache_get_formats(session->cache, dir, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV420_Y0L2, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV420_AQB1, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV444, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV444_10, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV420_2P_10, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV422_1P_10, &fw_formats); + mvx_clear_bit(MVX_FORMAT_YUV420_I420_10, &fw_formats); + mvx_clear_bit(MVX_FORMAT_BGR_888, &fw_formats); + mvx_clear_bit(MVX_FORMAT_ARGB_1555, &fw_formats); + mvx_clear_bit(MVX_FORMAT_ARGB_4444, &fw_formats); + mvx_clear_bit(MVX_FORMAT_RGB_565, &fw_formats); + mvx_clear_bit(MVX_FORMAT_VC1, &fw_formats); + mvx_clear_bit(MVX_FORMAT_RV, &fw_formats); + mvx_clear_bit(MVX_FORMAT_AVS, &fw_formats); + mvx_clear_bit(MVX_FORMAT_AVS2, &fw_formats); + *formats &= fw_formats; +} + +int mvx_session_try_format(struct mvx_session *session, + enum mvx_direction dir, + enum mvx_format format, + unsigned int *width, + unsigned int *height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + bool *interlaced) +{ + return try_format(session, dir, format, width, height, nplanes, + stride, size, interlaced); +} + +int mvx_session_set_format(struct mvx_session *session, + enum mvx_direction dir, + enum mvx_format format, + unsigned int pixelformat, + unsigned int *width, + unsigned int *height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + bool *interlaced) +{ + struct mvx_session_port *port = &session->port[dir]; + int ret; + + if (session->error != 0) + return session->error; + + if (mvx_is_afbc(format) != false && + session->coded_chroma_format == MVX_CHROMA_FORMAT_MONO && + (format != MVX_FORMAT_Y_AFBC_8 && format != MVX_FORMAT_Y_AFBC_10)) + return -EINVAL; + + if (session->port[dir].stream_on != false) + return -EBUSY; + + reset_resolution(session, width, height, dir); + + ret = try_format(session, dir, format, width, height, nplanes, + stride, size, interlaced); + if (ret != 0) + return ret; + + /* + * If the bitstream format changes, then the firmware binary must be + * released. + */ + if (mvx_is_bitstream(port->format) != false && + format != port->format) { + if (IS_ERR(session->fw_bin) != false) { + MVX_SESSION_WARN(session, + "Can't set format when firmware binary is pending. dir=%d.", + dir); + return -EINVAL; + } + + release_fw_bin(session); + } + + /* Update port settings. */ + port->format = format; + port->old_format = format; + port->pixelformat = pixelformat; + port->old_pixelformat = pixelformat; + port->width = *width; + port->height = *height; + port->nplanes = *nplanes; + port->interlaced = *interlaced; + memcpy(port->stride, stride, sizeof(*stride) * MVX_BUFFER_NPLANES); + memcpy(port->size, size, sizeof(*size) * MVX_BUFFER_NPLANES); + + /* TODO AFBC width will have to be provided by user space. */ + if (dir == MVX_DIR_INPUT) + port->afbc_width = DIV_ROUND_UP(*width, 16); + + /* Input dimensions dictate output dimensions. */ + if (dir == MVX_DIR_INPUT) { + struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT]; + (void)try_format(session, MVX_DIR_OUTPUT, p->format, &p->width, + &p->height, &p->nplanes, p->stride, p->size, + &p->interlaced); + } + + if (mvx_is_afbc(format) != false) { + if (dir == MVX_DIR_OUTPUT && + session->port[dir].afbc_width >= AFBC_MIN_WIDTH_IN_SUPERBLOCKS) { + if (session->dual_afbc_downscaled) + *width = session->port[dir].afbc_width_in_superblocks_downscaled << AFBC_SUPERBLOCK_SHIFT; + else + *width = session->port[dir].afbc_width << AFBC_SUPERBLOCK_SHIFT; + } else { + *width += session->port[dir].afbc_crop_left >> session->dual_afbc_downscaled; + } + + *height += session->port[dir].afbc_crop_top >> session->dual_afbc_downscaled; + } + + if (!session->is_encoder && dir == MVX_DIR_OUTPUT && + format <= MVX_FORMAT_BITSTREAM_LAST) { + MVX_SESSION_WARN(session, + "Decode session, compressed format %d is not supported for output.", + session->port[MVX_DIR_OUTPUT].format); + return -EINVAL; + } + + if (pixelformat == V4L2_PIX_FMT_H264_NO_SC && + session->nalu_format == MVX_NALU_FORMAT_UNDEFINED) + mvx_session_set_nalu_format(session, + MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD); + + return 0; +} + +int mvx_session_qbuf(struct mvx_session *session, + enum mvx_direction dir, + struct mvx_buffer *buf) +{ + int ret; + if (session->error != 0) + return session->error; + + buf->in_flags = buf->flags; + + if (is_fw_loaded(session) == false || + session->port[dir].is_flushing != false || + (session->port[dir].in_port_changing == true && session->port[dir].pending_source_change_event == false)) { + list_add_tail(&buf->head, &session->port[dir].buffer_queue); + return 0; + } + + ret = queue_buffer(session, dir, buf); + if (ret != 0) + return ret; + + ret = switch_in(session); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_session_send_eos(struct mvx_session *session) +{ + struct mvx_session_port *port = &session->port[MVX_DIR_OUTPUT]; + struct mvx_buffer *buf; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return fw_eos(session); + + if (list_empty(&port->buffer_queue) != false) { + MVX_SESSION_WARN(session, + "Unable to signal EOS. Output buffer queue empty."); + return 0; + } + + buf = list_first_entry(&port->buffer_queue, struct mvx_buffer, head); + list_del(&buf->head); + + mvx_buffer_clear(buf); + buf->flags |= MVX_BUFFER_EOS; + + session->event(session, MVX_SESSION_EVENT_BUFFER, buf); + + return 0; +} + +int mvx_session_streamon(struct mvx_session *session, + enum mvx_direction dir) +{ + enum mvx_direction bdir; + struct mvx_hw_ver hw_ver; + enum mvx_direction i; + int ret; + + MVX_SESSION_INFO(session, "Stream on. dir=%u.", dir); + + /* Verify that we don't enable an already activated port. */ + if (session->port[dir].stream_on != false) + return 0; + + session->port[dir].stream_on = true; + + /* Check that both ports are stream on. */ + if (!is_stream_on(session)) + return 0; + + /* Verify that a firmware binary load is not in progress. */ + if (IS_ERR(session->fw_bin)) { + /* Unlock during wait time as the same mutex may be locked in fw_bin_ready() */ + mutex_unlock(session->isession.mutex); + if (!wait_for_completion_timeout(&session->fw_loaded, msecs_to_jiffies(3000))) { + mutex_lock(session->isession.mutex); + ret = IS_ERR(session->fw_bin); + MVX_SESSION_WARN(session, "Wait for firmware loading timeout."); + goto disable_port; + } + mutex_lock(session->isession.mutex); + } + + /* + * If capture port is streamed on without stream-off during port changing, + * need to send flush message to VPU here to finish port changing. + * Otherwise, VPU firmware will get hang. + */ + if (dir == MVX_DIR_OUTPUT && session->port[dir].in_port_changing) { + return_done_buffers(session, dir); + flush_and_qbufs(session, dir); + session->port[dir].in_port_changing = false; + session->port[dir].hold_off_buffer_done = false; + } + + /* If a firmware binary is already loaded, then we are done. */ + if (session->fw_bin != NULL) { + ret = wait_pending(session); + if (ret != 0) + goto disable_port; + + ret = fw_state_change(session, MVX_FW_STATE_RUNNING); + if (ret != 0) + goto disable_port; + + return 0; + } + + bdir = get_bitstream_port(session); + if (bdir >= MVX_DIR_MAX) { + MVX_SESSION_WARN(session, + "Session only support decoding and encoding, but not transcoding. input_format=%u, output_format=%u.", + session->port[MVX_DIR_INPUT].format, + session->port[MVX_DIR_OUTPUT].format); + ret = -EINVAL; + goto disable_port; + } + + /* Verify that client can handle input and output formats. */ + for (i = MVX_DIR_INPUT; i < MVX_DIR_MAX; i++) { + uint64_t formats; + + session->client_ops->get_formats(session->client_ops, + i, &formats); + + if (!mvx_test_bit(session->port[i].format, &formats)) { + MVX_SESSION_WARN(session, + "Client cannot support requested formats. input_format=%u, output_format=%u.", + session->port[MVX_DIR_INPUT].format, + session->port[MVX_DIR_OUTPUT].format); + ret = -ENODEV; + goto disable_port; + } + } + + /* Increment session reference count and flag fw bin as pending. */ + mvx_session_get(session); + session->fw_bin = ERR_PTR(-EINPROGRESS); + session->client_ops->get_hw_ver(session->client_ops, &hw_ver); + + /* Requesting firmware binary to be loaded. */ + ret = mvx_fw_cache_get(session->cache, session->port[bdir].format, + bdir, &session->fw_event, &hw_ver, + session->isession.securevideo); + if (ret != 0) { + session->port[dir].stream_on = false; + session->fw_bin = NULL; + complete(&session->fw_loaded); + mvx_session_put(session); + return ret; + } + + return 0; + +disable_port: + session->port[dir].stream_on = false; + + return ret; +} + +int mvx_session_streamoff(struct mvx_session *session, + enum mvx_direction dir) +{ + struct mvx_session_port *port = &session->port[dir]; + struct mvx_buffer *buf; + struct mvx_buffer *tmp; + int ret = 0; + + MVX_SESSION_INFO(session, "Stream off. dir=%u.", dir); + + port->stream_on = false; + + if (is_fw_loaded(session) != false) { + /* + * Flush the ports if at least one buffer has been queued + * since last flush. + */ + if (port->flushed == false && port->is_flushing == false) { + ret = wait_pending(session); + if (ret != 0) + goto dequeue_buffers; + + if (!(dir == MVX_DIR_OUTPUT && port->in_port_changing == true) || + port->received_seq_param == false) { + ret = fw_state_change(session, MVX_FW_STATE_STOPPED); + if (ret != 0) + goto dequeue_buffers; + } + + ret = fw_flush(session, dir); + if (ret != 0) + goto dequeue_buffers; + ret = wait_pending(session); + if (ret != 0) + goto dequeue_buffers; + + if (dir == MVX_DIR_OUTPUT) { + port->hold_off_buffer_done = false; + port->in_port_changing = false; + } + } + } +dequeue_buffers: + /* Return buffers in pending queue. */ + list_for_each_entry_safe(buf, tmp, &port->buffer_queue, head) { + list_del(&buf->head); + buf->flags |= MVX_BUFFER_CORRUPT; + session->event(session, MVX_SESSION_EVENT_BUFFER, buf); + } + + return 0; +} + +int mvx_session_start(struct mvx_session *session) +{ + if (session->port[MVX_DIR_OUTPUT].in_port_changing == true) { + session->port[MVX_DIR_OUTPUT].in_port_changing = false; + session->port[MVX_DIR_OUTPUT].hold_off_buffer_done = false; + return_done_buffers(session, MVX_DIR_OUTPUT); + return flush_and_qbufs(session, MVX_DIR_OUTPUT); + } + + return 0; +} + +static void mvx_handle_alloc_param (struct mvx_session *session, struct mvx_fw_msg *msg) +{ + struct mvx_session_port *input = &session->port[MVX_DIR_INPUT]; + struct mvx_session_port *output = &session->port[MVX_DIR_OUTPUT]; + + output->in_port_changing = msg->alloc_param.width != session->orig_width; + output->in_port_changing |= msg->alloc_param.height != session->orig_height; + if (mvx_is_afbc(output->format)) + output->in_port_changing |= msg->alloc_param.afbc_alloc_bytes != output->afbc_alloc_bytes; + if (session->dual_afbc_downscaled && output->interlaced== 0) + output->in_port_changing |= + msg->alloc_param.afbc_alloc_bytes_downscaled != output->afbc_alloc_bytes_downscaled; + if (output->in_port_changing) { + output->received_seq_param = false; + output->pending_source_change_event = true; + } + + session->orig_width = msg->alloc_param.width; + session->orig_height = msg->alloc_param.height; + /* Update input port. */ + input->width = msg->alloc_param.width; + input->height = msg->alloc_param.height; + + try_format(session, MVX_DIR_INPUT, input->format, &input->width, + &input->height, &input->nplanes, input->stride, + input->size, &input->interlaced); + + /* + * Update output port. Set number of valid planes to 0 to force + * stride to be recalculated. + */ + + output->nplanes = 0; + if (input->format == MVX_FORMAT_AV1 && + mvx_is_afbc(output->format) != false) { + output->afbc_alloc_bytes = + max(output->afbc_alloc_bytes, msg->alloc_param.afbc_alloc_bytes); + output->afbc_width = + max(output->afbc_width, msg->alloc_param.afbc_width); + } else { + output->afbc_alloc_bytes = msg->alloc_param.afbc_alloc_bytes; + output->afbc_width = msg->alloc_param.afbc_width; + } + + if(session->dual_afbc_downscaled && output->interlaced== 0) { + output->afbc_alloc_bytes_downscaled = msg->alloc_param.afbc_alloc_bytes_downscaled; + output->afbc_width_in_superblocks_downscaled = msg->alloc_param.afbc_width_in_superblocks_downscaled; + } else { + output->afbc_alloc_bytes_downscaled = 0; + output->afbc_width_in_superblocks_downscaled = 0; + } + + output->last_interlaced_from_sps =output->interlaced; + output->afbc_crop_left = msg->alloc_param.cropx; + output->afbc_crop_top = msg->alloc_param.cropy; + + try_format(session, MVX_DIR_OUTPUT, output->format, + &output->new_width, &output->new_height, &output->nplanes, + output->stride, output->size, + &output->interlaced); + + session->client_ops->update_load(session->csession); + + MVX_SESSION_INFO(session, + "Firmware rsp: Alloc param. width=%u, height=%u, nplanes=%u, size=[%u, %u, %u], stride=[%u, %u, %u], interlaced=%d.", + msg->alloc_param.width, + msg->alloc_param.height, + output->nplanes, + output->size[0], + output->size[1], + output->size[2], + output->stride[0], + output->stride[1], + output->stride[2], + output->interlaced); +} + +static void mvx_handle_buffer_general(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + struct mvx_buffer *buf = msg->buf; + + session->port[buf->dir].buffer_count--; + session->event(session, MVX_SESSION_EVENT_BUFFER, buf); +} + +static void mvx_handle_buffer(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + uint32_t i; + uint32_t stride[MVX_BUFFER_NPLANES]; + bool send_buffer_event = true; + struct mvx_buffer *buf = msg->buf; + struct mvx_session_port *output = &session->port[MVX_DIR_OUTPUT]; + + /* + * There is no point to flush or invalidate input buffer + * after it was returned from the HW. + */ + if(buf->dir == MVX_DIR_OUTPUT && mvx_is_frame(buf->format)) { + if (!(buf->flags & MVX_BUFFER_FRAME_PRESENT)) { + int i=0; + for(i=0;inplanes;i++) { + if (output->size[i] > mvx_buffer_size(buf, i) || + session->port[buf->dir].buffer_allocated < session->port[buf->dir].buffer_min) + buf->flags |= MVX_BUFFER_FRAME_NEED_REALLOC; + } + } + } + + if (buf->dir == MVX_DIR_OUTPUT && session->port[MVX_DIR_INPUT].format == MVX_FORMAT_AV1 && + (buf->width != output->width || buf->height != output->height) && + mvx_is_afbc(session->port[MVX_DIR_OUTPUT].format) == false) { + uint32_t i; + uint32_t filled[MVX_BUFFER_NPLANES]; + uint32_t stride[MVX_BUFFER_NPLANES]; + + output->nplanes = 0; + memset(stride, 0, sizeof(stride)); + mvx_buffer_frame_dim(output->format, buf->width, buf->height, &output->nplanes, + stride, filled, session->setting_stride); + for (i = 0; i < buf->nplanes; i++) + (void)mvx_buffer_filled_set(buf, i, filled[i], 0); + + MVX_SESSION_INFO(session, + "Firmware rsp: Buffer. dir=%u, len=[%u, %u, %u], flags=0x%08x, eos=%u", + buf->dir, + filled[0], + filled[1], + filled[2], + buf->flags, + (buf->flags & MVX_BUFFER_EOS) != 0); + } + + session->port[buf->dir].buffer_count--; + + MVX_SESSION_INFO(session, + "Firmware rsp: Buffer. dir=%u, len=[%u, %u, %u], flags=0x%08x, eos=%u", + buf->dir, + buf->planes[0].filled, + buf->planes[1].filled, + buf->planes[2].filled, + buf->flags, + (buf->flags & MVX_BUFFER_EOS) != 0); + + // if (buf->dir == MVX_DIR_OUTPUT && !session->isession.securevideo) + // mvx_buffer_synch(buf, DMA_FROM_DEVICE); + + /* + * During port changing, driver doesn't dequeue the output frame buffer + * to client but add it to pending queue. If client doesn't re-allocate + * and intends to re-use the buffer, it should issue V4L2_DEC_CMD_START. + * And the buffer will be enqueued to FW in V4L2_DEC_CMD_START handler. + * + * For empty buffer with flags = 0, it doesn't contain valid information + * to client, so just put it to buffer_queue which can be re-queued to + * VPU. Also, some clients, like gstreamer v4l2 plug-in, always treat + * empty buffer as an eos signal, which is not expected when buffer flags + * doesn't have EOS. So most likely it should be re-queued sliently. + */ + if (buf->dir == MVX_DIR_OUTPUT && mvx_is_frame(buf->format)) { + output->frames_since_last_buffer_rejected++; + if (output->frames_since_last_buffer_rejected > MVX_DECODE_MAX_REJECTED_BUFFER_INTERVAL) + output->buffer_rejected_flag = false; + if (buf->planes[0].filled == 0 && + (buf->flags == 0 || (buf->flags & MVX_BUFFER_FRAME_NEED_REALLOC) != 0)) { + list_add_tail(&buf->head, &output->buffer_queue); + send_buffer_event = false; + } else if (output->in_port_changing == true) { + output->nplanes = 0; + for (i = 0; i < buf->nplanes; i++) + stride[i] = buf->planes[i].stride; + // update output buffer size for dump when resolution changed + mvx_buffer_frame_dim(output->format, buf->width, buf->height, &output->nplanes, + stride, output->size, session->setting_stride); + if (buf->width != output->width || buf->height != output->height) { + // When resolution changed only in ALLOC_PARAM msg + // Hold these output buffers in port->buffer_done_queue + // Then send MVX_SESSION_EVENT_PORT_CHANGED to client + output->hold_off_buffer_done = true; + session->event(session, MVX_SESSION_EVENT_PORT_CHANGED, (void *)MVX_DIR_OUTPUT); + } + if (output->hold_off_buffer_done) { + list_add_tail(&buf->head, &output->buffer_done_queue); + send_buffer_event = false; + } + } + + if (buf->width > 0 && buf->height > 0 && (buf->flags & MVX_BUFFER_FRAME_PRESENT)) { + for (i = 0; i < buf->nplanes; i++) + (void)mvx_buffer_filled_set(buf, i, output->size[i], 0); + } + } + if (send_buffer_event) + session->event(session, MVX_SESSION_EVENT_BUFFER, buf); + + /* + * If buffer is too small, fw will send ALLOC_PARAM msg first, so + * flag in_port_changing should be set. And driver should send PORT_CHANGED + * event to client to request buffer re-allocation. + */ + if (buf->flags & MVX_BUFFER_REJECTED && output->in_port_changing == true) { + /* + * some rejected buffer may be last buffer with previous resolution + * or ineffective. + * so we judge non-key frmae resolution change by both rejected flag + * and variable width or height. + */ + if (buf->width != output->width || buf->height != output->height) { + if (!output->buffer_rejected_flag ) { + output->buffer_rejected_flag = true; + output->frames_since_last_buffer_rejected = 0; + output->last_buffer_width = output->width; + output->last_buffer_height = output->height; + session->event(session, MVX_SESSION_EVENT_PORT_CHANGED, (void *)MVX_DIR_OUTPUT); + } else { + if ( output->last_buffer_width == output->width && output->last_buffer_height == output->height) { + output->buffer_rejected_flag = true; + output->frames_since_last_buffer_rejected = 0; + } else { + send_event_error(session, -EINVAL); + return; + } + } + } + } + + if ((mvx_log_perf.enabled & MVX_LOG_PERF_FPS) && session->ts && + buf->planes[0].filled > 0 && buf->dir == MVX_DIR_OUTPUT) { + if ((session->is_encoder && (buf->flags & MVX_BUFFER_EOF)) || + !session->is_encoder) { + struct timespec64 *ts = session->ts + session->ts_index; + mutex_lock(&session->fps_mutex); + ktime_get_real_ts64(ts); + session->ts_index = (session->ts_index + 1) & (MAX_RT_FPS_FRAMES - 1); + session->frame_count++; + if (session->frame_count == FPS_SKIP_FRAMES) { + session->start.tv_sec = ts->tv_sec; + session->start.tv_nsec = ts->tv_nsec; + } + mutex_unlock(&session->fps_mutex); + } + } +} + +static void mvx_handle_display_size(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + uint32_t stride[MVX_BUFFER_NPLANES]; + uint32_t i; + struct mvx_session_port *output = &session->port[MVX_DIR_OUTPUT]; + + if (session->port[MVX_DIR_INPUT].format < MVX_FORMAT_BITSTREAM_FIRST || + session->port[MVX_DIR_INPUT].format > MVX_FORMAT_BITSTREAM_LAST) + return; + + if (mvx_is_afbc(output->format) == false) { + output->nplanes = 0; + memset(stride, 0, sizeof(stride)); + mvx_buffer_frame_dim(output->format, msg->disp_size.display_width, msg->disp_size.display_height, + &output->nplanes, stride, output->display_size, session->setting_stride); + for (i = 0; i < MVX_BUFFER_NPLANES; i++) + session->setting_stride[i] = max_t(unsigned int, session->setting_stride[i], stride[i]); + + MVX_SESSION_INFO(session, "Firmware rsp: display size. len=[%u, %u, %u]", + output->display_size[0], + output->display_size[1], + output->display_size[2]); + } else if (session->port[MVX_DIR_INPUT].format == MVX_FORMAT_AV1) { + //handle for av1 afbc for now + int s1 = calc_afbc_size(session, output->format, msg->disp_size.display_width, + msg->disp_size.display_height, true, true, false, + false); + int s2 = calc_afbc_size(session, output->format, msg->disp_size.display_width, + msg->disp_size.display_height, true, true, true, + false); + int s = max_t(unsigned int, s1, s2); + output->afbc_alloc_bytes = max_t(unsigned int, output->afbc_alloc_bytes, s); + } + output->display_size_format = output->format; +} + +static void mvx_handle_color_desc(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, + "Firmware rsp: Color desc."); + session->color_desc = msg->color_desc; + session->event(session, MVX_SESSION_EVENT_COLOR_DESC, NULL); +} + +static void mvx_handle_error(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_WARN(session, "Firmware rsp: Error. code=%u, message=%s.", + msg->error.error_code, msg->error.message); + + /* + * Release the dev session. It will prevent a dead session from + * blocking the scheduler. + */ + watchdog_stop(session); + /* + * Terminate this session so other session can be switched in. + * Client might not response to the error and terminate session properly. + */ + if (session->switched_in) + session->client_ops->terminate(session->csession); + /* + * Firmware could be hang or in unexpected state, and the session should exit now. + * Just switch out the session so suspending can move forward. + */ + switch_out_rsp(session); + send_event_error(session, -EINVAL); +} + +static void mvx_handle_flush(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: Flushed. dir=%d.", + msg->flush.dir); + session->port[msg->flush.dir].is_flushing = false; + session->port[msg->flush.dir].flushed = true; +} + +static void mvx_handle_idle(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + struct mvx_fw_msg msg_ack = { .code = MVX_FW_CODE_IDLE_ACK }; + int ret; + + MVX_SESSION_INFO(session, "Firmware rsp: Idle."); + + session->idle_count++; + + if (session->idle_count == 2) + fw_switch_out(session); + + ret = session->fw.ops.put_message(&session->fw, &msg_ack); + if (ret == 0) + ret = send_irq(session); + if (ret != 0) + send_event_error(session, ret); +} + +static void mvx_handle_job(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: Job."); + session->client_ops->reset_priority(session->csession); + (void)fw_job(session, session->job_frames); +} + +static void mvx_handle_pong(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: Pong."); +} + +static void mvx_update_bitdepth(struct mvx_session *session, uint32_t bitdepth) +{ + struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT]; + struct mvx_session_format_map *map; + + if (!mvx_is_frame(p->format)) + return; + + map = mvx_session_find_format(p->pixelformat); + if (IS_ERR(map)) { + MVX_SESSION_ERR(session, "Find pixelformat(0x%x) fail", p->pixelformat); + return; + } + + p->old_format = p->format; + p->old_pixelformat = p->pixelformat; + + if (bitdepth == map->bitdepth) { + return; + } else if (bitdepth > map->bitdepth) { + p->format = map->to10_format; + p->pixelformat = map->to10_pixelformat; + } else { + p->format = map->to8_format; + p->pixelformat = map->to8_pixelformat; + } + + MVX_SESSION_INFO(session, "Stream's bitdepth(%d) is different from setting(%d), update format from %x to %x", + bitdepth, map->bitdepth, map->format, p->format); + + try_format(session, MVX_DIR_OUTPUT, p->format, + &p->width, &p->height, &p->nplanes, + p->stride, p->size, + &p->interlaced); + + return; +} + +static void mvx_handle_seq_param(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT]; + int ret; + + MVX_SESSION_INFO(session, + "Firmware rsp: Seq param. planar={buffers_min=%u}, afbc={buffers_min=%u}, interlaced=%d.", + msg->seq_param.planar.buffers_min, + msg->seq_param.afbc.buffers_min, + p->interlaced); + + if (mvx_is_afbc(p->format) != false) { + p->in_port_changing |= msg->seq_param.afbc.buffers_min != p->buffer_min; + p->buffer_min = msg->seq_param.afbc.buffers_min; + //for MVX_FW_CODE_ALLOC_PARAM is send before MVX_FW_CODE_SEQ_PARAM msg + if(p->last_interlaced_from_sps != p->interlaced) { + //auto detected interlace streams from sps,need update try_format + if(p->interlaced) { + p->afbc_alloc_bytes_downscaled = 0; + p->afbc_width_in_superblocks_downscaled = 0; + if(session->dual_afbc_downscaled) { + session->dual_afbc_downscaled =0; + MVX_SESSION_INFO(session, + "detect interlaced changed %d->%d from sps for afbc out. disbale dual_afbc_downscaled", + p->last_interlaced_from_sps, + p->interlaced); + } + } + + try_format(session, MVX_DIR_OUTPUT, p->format, + &p->width, &p->height, &p->nplanes, + p->stride, p->size, + &p->interlaced); + MVX_SESSION_INFO(session, + "detect interlaced changed %d->%d from sps for afbc out. call try_format", + p->last_interlaced_from_sps, + p->interlaced); + + p->last_interlaced_from_sps =p->interlaced; + } + } else { + p->in_port_changing |= msg->seq_param.planar.buffers_min != p->buffer_min; + p->buffer_min = session->port[MVX_DIR_INPUT].format == MVX_FORMAT_AV1 ? + MVX_DECODE_AV1_PLANNAR_BUFFER_NUM : MVX_DECODE_PLANNAR_BUFFER_NUM; + p->buffer_min = p->new_width * p->new_height <= 8192 * 8192 ? + max(msg->seq_param.planar.buffers_min, p->buffer_min) : + msg->seq_param.planar.buffers_min; + } + + p->in_port_changing |= msg->seq_param.bitdepth_luma != session->bitdepth_luma; + p->in_port_changing |= msg->seq_param.bitdepth_chroma != session->bitdepth_chroma; + p->in_port_changing |= msg->seq_param.chroma_format != session->coded_chroma_format; + mvx_update_bitdepth(session, msg->seq_param.bitdepth_chroma); + session->bitdepth_luma = msg->seq_param.bitdepth_luma; + session->bitdepth_chroma = msg->seq_param.bitdepth_chroma; + session->coded_chroma_format = msg->seq_param.chroma_format; + + p->received_seq_param = true; + + // update frame buffer VA as begin when received seq_param msg + ret = mutex_lock_interruptible(&session->fw.mem_mutex); + if (ret == 0) { + enum mvx_fw_region region = MVX_FW_REGION_FRAMEBUF; + mvx_mmu_va begin; + mvx_mmu_va end; + mvx_mmu_va available_length; + session->fw.ops.get_region(region, &begin, &end); + session->fw.next_va_region_outbuf = begin; + available_length = end - begin - msg->seq_param.afbc.buffers_min * p->afbc_alloc_bytes; + p->rest_frame_map_size = available_length > 0 ? available_length : 0; + mutex_unlock(&session->fw.mem_mutex); + } + + if (mvx_is_frame(p->format) && !mvx_is_afbc(p->format)) { + int i; + uint32_t frame_size = 0; + unsigned int dummy_setting_stride[MVX_BUFFER_NPLANES] = {0, 0, 0}; + unsigned int dummy_stride[MVX_BUFFER_NPLANES] = {0, 0, 0}; + unsigned int tmp_size[MVX_BUFFER_NPLANES] = {0, 0, 0}; + ret = mvx_buffer_frame_dim(p->format, p->width, p->height, &p->nplanes, + dummy_stride, tmp_size, dummy_setting_stride); + for (i = 0; i < p->nplanes; i++) + frame_size += tmp_size[i]; + // keep buffer_min from firmware to ensure decode will not block + // when buffer_max got from VA region is too small + p->buffer_max = clamp_t(uint32_t, p->rest_frame_map_size / frame_size, + msg->seq_param.planar.buffers_min, VIDEO_MAX_FRAME); + } + + if (p->in_port_changing == true) + session->event(session, MVX_SESSION_EVENT_PORT_CHANGED, (void *)MVX_DIR_OUTPUT); + else + queue_work(system_unbound_wq, &session->flush_and_queue_work); + + session->client_ops->update_load(session->csession); +} + +static void mvx_handle_set_option(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: Set option."); +} + +static void mvx_handle_state_change(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: State changed. old=%s, new=%s.", + state_to_string(session->fw_state), + state_to_string(msg->state)); + session->fw_state = msg->state; + + session->client_ops->update_load(session->csession); +} + +static void mvx_handle_switch_in(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + watchdog_start(session, session_watchdog_timeout, true); +} + +static void mvx_handle_switch_out(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: Switched out."); + + watchdog_stop(session); + switch_out_rsp(session); + + if ((session->fw_state == MVX_FW_STATE_RUNNING && session->idle_count < 2) || session->fw.msg_pending > 0) + switch_in(session); +} + +static void mvx_handle_dump(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: dump."); +} + +static void mvx_handle_debug(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + MVX_SESSION_INFO(session, "Firmware rsp: debug."); +} + +static void mvx_handle_unknown(struct mvx_session *session, struct mvx_fw_msg *msg) +{ + print_debug(session); +} + +struct mvx_fw_msg_handler { + uint32_t code; + void (*done)(struct mvx_session *session, struct mvx_fw_msg *msg); +}; + +static struct mvx_fw_msg_handler handlers[] = { + {MVX_FW_CODE_ALLOC_PARAM, mvx_handle_alloc_param}, + {MVX_FW_CODE_BUFFER_GENERAL, mvx_handle_buffer_general}, + {MVX_FW_CODE_BUFFER, mvx_handle_buffer}, + {MVX_FW_CODE_DISPLAY_SIZE, mvx_handle_display_size}, + {MVX_FW_CODE_COLOR_DESC, mvx_handle_color_desc}, + {MVX_FW_CODE_ERROR, mvx_handle_error}, + {MVX_FW_CODE_FLUSH, mvx_handle_flush}, + {MVX_FW_CODE_IDLE, mvx_handle_idle}, + {MVX_FW_CODE_JOB, mvx_handle_job}, + {MVX_FW_CODE_PONG, mvx_handle_pong}, + {MVX_FW_CODE_SEQ_PARAM, mvx_handle_seq_param}, + {MVX_FW_CODE_SET_OPTION, mvx_handle_set_option}, + {MVX_FW_CODE_STATE_CHANGE, mvx_handle_state_change}, + {MVX_FW_CODE_SWITCH_IN, mvx_handle_switch_in}, + {MVX_FW_CODE_SWITCH_OUT, mvx_handle_switch_out}, + {MVX_FW_CODE_DUMP, mvx_handle_dump}, + {MVX_FW_CODE_DEBUG, mvx_handle_debug}, + {MVX_FW_CODE_UNKNOWN, mvx_handle_unknown}, +}; + +static void handle_fw_message(struct mvx_session *session, + struct mvx_fw_msg *msg) +{ + uint32_t i; + struct mvx_fw_msg_handler *handler = NULL; + + for (i = 0; i < ARRAY_SIZE(handlers); i++) { + if (handlers[i].code == msg->code) { + handler = &handlers[i]; + break; + } + } + + if (handler && handler->done) + handler->done(session, msg); +} + +void mvx_session_irq(struct mvx_if_session *isession) +{ + struct mvx_session *session = mvx_if_session_to_session(isession); + int ret; + int retry; + + if (is_fw_loaded(session) == false) + return; + + ret = session->fw.ops.handle_rpc(&session->fw); + if (ret < 0) { + send_event_error(session, ret); + return; + } + +#define GET_MSG_MAX_RETRY 10 + retry = 0; + do { + struct mvx_fw_msg msg; + unsigned int timeout_ms = session->watchdog_count > 0 ? + session_watchdog_timeout * session->watchdog_count : + session_watchdog_timeout; + + watchdog_update(session, timeout_ms); + + ret = session->fw.ops.get_message(&session->fw, &msg); + if (ret < 0) { + send_event_error(session, ret); + return; + } else if (ret == EAGAIN) { + retry++; + if (retry > GET_MSG_MAX_RETRY) { + MVX_LOG_PRINT(&mvx_log_if, MVX_WAR_LOG_LEVEL, + "Unknown fw message code."); + ret = -EINVAL; + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_WAR_LOG_LEVEL, + "Retry %d", retry); + continue; + } + } + + retry = 0; + + if (ret > 0) + handle_fw_message(session, &msg); + } while (ret > 0 && session->error == 0); + + ret = session->fw.ops.handle_fw_ram_print(&session->fw); + if (ret < 0) { + send_event_error(session, ret); + return; + } + + wake_up(&session->waitq); +} + +void mvx_session_port_show(struct mvx_session_port *port, + struct seq_file *s) +{ + mvx_seq_printf(s, "mvx_session_port", 0, "%px\n", port); + mvx_seq_printf(s, "format", 1, "%08x\n", port->format); + mvx_seq_printf(s, "width", 1, "%u\n", port->width); + mvx_seq_printf(s, "height", 1, "%u\n", port->height); + mvx_seq_printf(s, "buffer_min", 1, "%u\n", port->buffer_min); + mvx_seq_printf(s, "buffer_count", 1, "%u\n", port->buffer_count); +} + +int mvx_session_set_securevideo(struct mvx_session *session, + bool securevideo) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->isession.securevideo = securevideo; + + return 0; +} + +int mvx_session_set_frame_rate(struct mvx_session *session, + uint32_t frame_rate_n, uint32_t frame_rate_d) +{ + int ret; + if (session->error != 0) + return session->error; + + if (frame_rate_n != 0 && frame_rate_d != 0) { + session->fps_n = frame_rate_n; + session->fps_d = frame_rate_d; + } + + if (is_fw_loaded(session) != false && session->is_encoder) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_FRAME_RATE; + option.frame_rate = FRAC_TO_Q16(session->fps_n, session->fps_d); + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + return 0; +} + +int mvx_session_set_rate_control(struct mvx_session *session, + bool enabled) +{ + if (session->error != 0) + return session->error; + + session->rc_enabled = enabled; + + return 0; +} + +int mvx_session_set_bitrate_mode(struct mvx_session *session, + int mode) +{ + if (session->error != 0) + return session->error; + + if (mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + session->rc_type = MVX_OPT_RATE_CONTROL_MODE_VARIABLE; + else if (mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) + session->rc_type = MVX_OPT_RATE_CONTROL_MODE_CONSTANT; + else if (mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CVBR) + session->rc_type = MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE; + else if (mode == V4L2_MPEG_VIDEO_BITRATE_MODE_STANDARD) + session->rc_type = MVX_OPT_RATE_CONTROL_MODE_STANDARD; + else + return -EINVAL; + + return 0; +} + +int mvx_session_set_bitrate(struct mvx_session *session, + int bitrate) +{ + int ret; + + if (session->error != 0) + return session->error; + + session->target_bitrate = bitrate; + if (session->rc_type == MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE && + session->maximum_bitrate < bitrate) + session->maximum_bitrate = bitrate; + + if (is_fw_loaded(session) != false && session->rc_enabled != false && + session->port[get_bitstream_port(session)].format != MVX_FORMAT_JPEG) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_RATE_CONTROL; + option.rate_control.target_bitrate = session->target_bitrate; + option.rate_control.rate_control_mode = session->rc_type; + + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + return 0; +} + +int mvx_session_set_max_bitrate(struct mvx_session *session, + int bitrate) +{ + int ret; + + if (session->error != 0) + return session->error; + + if (session->rc_type != MVX_OPT_RATE_CONTROL_MODE_C_VARIABLE) + return 0; + + session->maximum_bitrate = bitrate; + if (session->maximum_bitrate < session->target_bitrate) + session->maximum_bitrate = session->target_bitrate; + + if (is_fw_loaded(session) != false && session->rc_enabled != false && + session->port[get_bitstream_port(session)].format != MVX_FORMAT_JPEG) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_RATE_CONTROL; + option.rate_control.target_bitrate = session->target_bitrate; + option.rate_control.rate_control_mode = session->rc_type; + option.rate_control.maximum_bitrate = session->maximum_bitrate; + + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + return 0; +} + +int mvx_session_set_crop_left(struct mvx_session * session, int32_t left){ + + if (session->error != 0) + return session->error; + + session->crop_left = left; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; + +} + +int mvx_session_set_crop_right(struct mvx_session * session, int32_t right){ + + if (session->error != 0) + return session->error; + + session->crop_right = right; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; + +} + +int mvx_session_set_crop_top(struct mvx_session * session, int32_t top){ + + if (session->error != 0) + return session->error; + + session->crop_top = top; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; + +} + +int mvx_session_set_crop_bottom(struct mvx_session * session, int32_t bottom){ + + if (session->error != 0) + return session->error; + + session->crop_bottom = bottom; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; + +} + +int mvx_session_set_rc_bit_i_mode(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + session->rc_bit_i_mode = val; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; +} + +int mvx_session_set_rc_bit_i_ratio(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + session->rc_bit_i_ratio = val; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; +} + +int mvx_session_set_inter_med_buf_size(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + session->inter_med_buf_size = val; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; +} + +int mvx_session_set_svct3_level1_period(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + session->svct3_level1_period = val; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; +} + +int mvx_session_set_nalu_format(struct mvx_session *session, + enum mvx_nalu_format fmt) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->nalu_format = fmt; + + return 0; +} + +int mvx_session_set_stream_escaping(struct mvx_session *session, + enum mvx_tristate status) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->stream_escaping = status; + + return 0; +} + +int mvx_session_set_profile(struct mvx_session *session, + enum mvx_format format, + enum mvx_profile profile) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->profile[format] = profile; + + return 0; +} + +int mvx_session_set_level(struct mvx_session *session, + enum mvx_format format, + enum mvx_level level) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->level[format] = level; + + return 0; +} + +int mvx_session_set_tier(struct mvx_session *session, + enum mvx_format format, + enum mvx_tier tier) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->tier[format] = tier; + + return 0; +} + +int mvx_session_set_ignore_stream_headers(struct mvx_session *session, + enum mvx_tristate status) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->ignore_stream_headers = status; + + return 0; +} + +int mvx_session_set_frame_reordering(struct mvx_session *session, + enum mvx_tristate status) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->frame_reordering = status; + + return 0; +} + +int mvx_session_set_intbuf_size(struct mvx_session *session, + int size) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->intbuf_size = size; + + return 0; +} + +int mvx_session_set_b_frames(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->b_frames = val; + + return 0; +} + +int mvx_session_set_gop_size(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->gop_size = val; + + return 0; +} + +int mvx_session_set_gop_type(struct mvx_session *session, + enum mvx_gop_type gop_type) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->gop_type = gop_type; + + return 0; +} + +int mvx_session_set_cyclic_intra_refresh_mb(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->cyclic_intra_refresh_mb = val; + return 0; +} + +int mvx_session_set_constr_ipred(struct mvx_session *session, + enum mvx_tristate status) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->constr_ipred = status; + + return 0; +} + +int mvx_session_set_entropy_sync(struct mvx_session *session, + enum mvx_tristate status) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->entropy_sync = status; + + return 0; +} + +int mvx_session_set_temporal_mvp(struct mvx_session *session, + enum mvx_tristate status) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->temporal_mvp = status; + + return 0; +} + +int mvx_session_set_tile_rows(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->tile_rows = val; + + return 0; +} + +int mvx_session_set_tile_cols(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->tile_cols = val; + + return 0; +} + +int mvx_session_set_min_luma_cb_size(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + if (val == 8 || val == 16){ + session->min_luma_cb_size = val; + } else { + session->min_luma_cb_size = 0; + } + return 0; +} + +int mvx_session_set_mb_mask(struct mvx_session *session, + int val) +{ + /* + * This controls is not implemented. + */ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->mb_mask = val; + + return 0; +} + +int mvx_session_set_entropy_mode(struct mvx_session *session, + enum mvx_entropy_mode mode) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->entropy_mode = mode; + + return 0; +} + +int mvx_session_set_multi_slice_mode(struct mvx_session *session, + enum mvx_multi_slice_mode mode) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->multi_slice_mode = mode; + + return 0; +} + +int mvx_session_set_multi_slice_max_mb(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->multi_slice_max_mb = val; + + return 0; +} + +int mvx_session_set_vp9_prob_update(struct mvx_session *session, + enum mvx_vp9_prob_update mode) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->vp9_prob_update = mode; + + return 0; +} + +int mvx_session_set_mv_h_search_range(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->mv_h_search_range = val; + + return 0; +} + +int mvx_session_set_mv_v_search_range(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->mv_v_search_range = val; + + return 0; +} + +int mvx_session_set_bitdepth_chroma(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->bitdepth_chroma = val; + + return 0; +} + +int mvx_session_set_bitdepth_luma(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->bitdepth_luma = val; + + return 0; +} + +int mvx_session_set_force_chroma_format(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->force_chroma_format = val; + + return 0; +} + +int mvx_session_set_rgb_to_yuv_mode(struct mvx_session *session, + enum mvx_rgb_to_yuv_mode mode) +{ + if(mode == MVX_RGB_TO_YUV_MODE_MAX) + return 0; + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->rgb_to_yuv = mode; + session->use_cust_rgb_to_yuv_mode = MVX_CUST_YUV2RGB_MODE_STANDARD; + + return 0; +} + +int mvx_session_set_band_limit(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->band_limit = val; + + return 0; +} + +int mvx_session_set_cabac_init_idc(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->cabac_init_idc = val; + + return 0; +} + +static int mvx_session_get_fmt_qp_range(enum mvx_format fmt, + int *qp_min, + int *qp_max) +{ + switch (fmt) { + case MVX_FORMAT_H263: + *qp_min = 0; + *qp_max = 31; + break; + case MVX_FORMAT_H264: + case MVX_FORMAT_HEVC: + *qp_min = 0; + *qp_max = 51; + break; + case MVX_FORMAT_VP9: + case MVX_FORMAT_VP8: + *qp_min = 0; + *qp_max = 63; + break; + default: + *qp_min = 0; + *qp_max = 63; + break; + } + + return 0; +} + +int mvx_session_set_i_frame_qp(struct mvx_session *session, + enum mvx_format fmt, + int qp) +{ + int ret; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) { + enum mvx_direction dir = get_bitstream_port(session); + + fmt = session->port[dir].format; + ret = fw_set_qp(session, MVX_FW_SET_QP_I, qp); + if (ret != 0) + return ret; + } + + session->qp[fmt].i_frame = qp; + if (fmt == MVX_FORMAT_VP9) + session->qp[MVX_FORMAT_VP8].i_frame = qp; + + return 0; +} + +int mvx_session_set_p_frame_qp(struct mvx_session *session, + enum mvx_format fmt, + int qp) +{ + int ret; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) { + enum mvx_direction dir = get_bitstream_port(session); + + fmt = session->port[dir].format; + ret = fw_set_qp(session, MVX_FW_SET_QP_P, qp); + if (ret != 0) + return ret; + } + + session->qp[fmt].p_frame = qp; + if (fmt == MVX_FORMAT_VP9) + session->qp[MVX_FORMAT_VP8].p_frame = qp; + + return 0; +} + +int mvx_session_set_b_frame_qp(struct mvx_session *session, + enum mvx_format fmt, + int qp) +{ + int ret; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) { + enum mvx_direction dir = get_bitstream_port(session); + + fmt = session->port[dir].format; + ret = fw_set_qp(session, MVX_FW_SET_QP_B, qp); + if (ret != 0) + return ret; + } + + session->qp[fmt].b_frame = qp; + + return 0; +} + +int mvx_session_set_min_qp(struct mvx_session *session, + enum mvx_format fmt, + int qp_min) +{ + int ret; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) { + struct mvx_fw_set_option option; + enum mvx_direction dir = get_bitstream_port(session); + int codec = session->port[dir].format; + int qp_max = session->qp[codec].max; + + if (qp_min > qp_max) { + int _qp_min, _qp_max; + + mvx_session_get_fmt_qp_range(fmt, &_qp_min, &_qp_max); + qp_max = _qp_max; + session->qp[fmt].max = qp_max; + if (fmt == MVX_FORMAT_VP9) + session->qp[MVX_FORMAT_VP8].max = qp_max; + } + + option.code = MVX_FW_SET_QP_RANGE; + option.qp_range.min = qp_min; + option.qp_range.max = qp_max; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + session->qp[fmt].min = qp_min; + if (fmt == MVX_FORMAT_VP9) + session->qp[MVX_FORMAT_VP8].min = qp_min; + + return 0; +} + +int mvx_session_set_max_qp(struct mvx_session *session, + enum mvx_format fmt, + int qp_max) +{ + int ret; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) { + struct mvx_fw_set_option option; + enum mvx_direction dir = get_bitstream_port(session); + int codec = session->port[dir].format; + int qp_min = session->qp[codec].min; + + if (qp_min > qp_max) { + int _qp_min, _qp_max; + + mvx_session_get_fmt_qp_range(fmt, &_qp_min, &_qp_max); + qp_min = _qp_min; + session->qp[fmt].min = qp_min; + if (fmt == MVX_FORMAT_VP9) + session->qp[MVX_FORMAT_VP8].min = qp_min; + } + + option.code = MVX_FW_SET_QP_RANGE; + option.qp_range.min = qp_min; + option.qp_range.max = qp_max; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + session->qp[fmt].max = qp_max; + if (fmt == MVX_FORMAT_VP9) + session->qp[MVX_FORMAT_VP8].max = qp_max; + + return 0; +} + +int mvx_session_set_resync_interval(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->resync_interval = val; + + return 0; +} + +int mvx_session_set_jpeg_quality(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->jpeg_quality = val; + + return 0; +} + +int mvx_session_set_jpeg_quality_luma(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->jpeg_quality_luma = val; + + return 0; +} + +int mvx_session_set_jpeg_quality_chroma(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->jpeg_quality_chroma = val; + + return 0; +} + + +int mvx_session_get_color_desc(struct mvx_session *session, + struct mvx_fw_color_desc *color_desc) +{ + *color_desc = session->color_desc; + return 0; +} + +int mvx_session_set_color_desc(struct mvx_session *session, + struct mvx_fw_color_desc *color_desc) +{ + int ret = 0; + if (session->error != 0) + return session->error; + + session->color_desc = *color_desc; + if (is_fw_loaded(session) != false) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_COLOUR_DESC; + option.colour_desc = *color_desc; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + return 0; +} + +int mvx_session_set_osd_config(struct mvx_session *session, + struct mvx_osd_config *osd) +{ + int ret = 0; + int osd_cfg_num = 0; + if (is_fw_loaded(session) == false || + session->port[MVX_DIR_INPUT].is_flushing != false) { + osd_cfg_num = session->port[MVX_DIR_INPUT].osd_cfg_num; + if (osd_cfg_num < MVX_ROI_QP_NUMS) { + MVX_SESSION_INFO(session, "fw is not ready!!!, pending osd num:%d",osd_cfg_num); + session->port[MVX_DIR_INPUT].osd_cfg_queue[osd_cfg_num] = *osd; + session->port[MVX_DIR_INPUT].osd_cfg_num++; + } else { + MVX_SESSION_ERR(session, "fw is not ready for long time, too many osd pending:%d",osd_cfg_num); + } + return 0; + } + ret = queue_osd_config(session, osd); + return ret; +} + +int mvx_session_set_osd_info(struct mvx_session *session, + struct mvx_osd_info *osd) +{ + session->osd_info = *osd; + return 0; +}; + +int mvx_session_set_roi_regions(struct mvx_session *session, + struct mvx_roi_config *roi) +{ + int ret = 0; + int roi_config_num = 0; + if (is_fw_loaded(session) == false || + session->port[MVX_DIR_INPUT].is_flushing != false) { + roi_config_num = session->port[MVX_DIR_INPUT].roi_config_num; + if (roi_config_num < MVX_ROI_QP_NUMS) { + MVX_SESSION_INFO(session, "fw is not ready!!!, pending roi num:%d",roi_config_num); + session->port[MVX_DIR_INPUT].roi_config_queue[roi_config_num] = *roi; + session->port[MVX_DIR_INPUT].roi_config_num++; + } else { + MVX_SESSION_ERR(session, "fw is not ready for long time, too many roi pending:%d",roi_config_num); + } + return 0; + } + ret = queue_roi_regions(session, roi); + return 0; +} + +int mvx_session_set_qp_epr(struct mvx_session *session, + struct mvx_buffer_param_qp *qp) +{ + int ret = 0; + int qp_num = 0; + if (is_fw_loaded(session) == false || + session->port[MVX_DIR_INPUT].is_flushing != false) { + qp_num = session->port[MVX_DIR_INPUT].qp_num; + if (qp_num < MVX_ROI_QP_NUMS) { + MVX_SESSION_WARN(session, "fw is not ready!!!, pending qp num:%d",qp_num); + session->port[MVX_DIR_INPUT].qp_queue[qp_num] = *qp; + session->port[MVX_DIR_INPUT].qp_num++; + } else { + MVX_SESSION_ERR(session, "fw is not ready for long time, too many qp pending:%d",qp_num); + } + return 0; + } + ret = queue_qp_epr(session, qp); + return 0; +} + +int mvx_session_set_sei_userdata(struct mvx_session *session, + struct mvx_sei_userdata *userdata) +{ + int ret = 0; + if (session->error != 0) + return session->error; + + session->sei_userdata = *userdata; + if (is_fw_loaded(session) != false) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_SEI_USERDATA; + option.userdata = *userdata; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + return ret; +} + +int mvx_session_set_hrd_buffer_size(struct mvx_session *session, + int size) +{ + int ret = 0; + + if (session->error != 0) + return session->error; + + if (session->port[MVX_DIR_OUTPUT].format == MVX_FORMAT_JPEG) { + MVX_SESSION_WARN(session, "format %d does not support set HRD Buffer Size", + session->port[MVX_DIR_OUTPUT].format); + return ret; + } + + session->nHRDBufsize = size; + + if (is_fw_loaded(session) != false) { + struct mvx_fw_set_option option; + + option.code = MVX_FW_SET_HRD_BUF_SIZE; + option.nHRDBufsize = size; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + return 0; +} + +int mvx_session_set_dsl_frame(struct mvx_session *session, + struct mvx_dsl_frame *dsl) +{ + struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT]; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + + session->dsl_frame.width = dsl->width; + session->dsl_frame.height = dsl->height; + p->width = session->orig_width; + p->height = session->orig_height; + try_format(session, MVX_DIR_OUTPUT, p->format, + &p->width, &p->height, &p->nplanes, + p->stride, p->size, + &p->interlaced); + + return 0; +} + +int mvx_session_set_dsl_ratio(struct mvx_session *session, + struct mvx_dsl_ratio *dsl) +{ + if (session->error != 0) + return session->error; + + session->dsl_ratio.hor = dsl->hor; + session->dsl_ratio.ver = dsl->ver; + return 0; +} + +int mvx_session_set_long_term_ref(struct mvx_session *session, + struct mvx_long_term_ref *ltr) +{ + if (session->error != 0) + return session->error; + + session->mvx_ltr.mode = ltr->mode; + session->mvx_ltr.period = ltr->period; + if (is_fw_loaded(session) != false) { + return -EBUSY; + } + return 0; +} + +int mvx_session_set_dsl_mode(struct mvx_session *session, + int *mode) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->dsl_pos_mode = *mode; + + return 0; +} + +int mvx_session_set_mini_frame_cnt(struct mvx_session *session, + int *cnt) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->mini_frame_cnt = *cnt; + return 0; +} + +int mvx_session_set_stats_mode(struct mvx_session *session, + struct mvx_enc_stats *stats) +{ + int ret = 0; + int enc_stats_num = 0; + if (is_fw_loaded(session) == false || + session->port[MVX_DIR_INPUT].is_flushing != false) { + enc_stats_num = session->port[MVX_DIR_INPUT].enc_stats_num; + if (enc_stats_num < MVX_ROI_QP_NUMS) { + MVX_SESSION_INFO(session, "fw is not ready!!!, pending enc stats num:%d",enc_stats_num); + session->port[MVX_DIR_INPUT].enc_stats_queue[enc_stats_num] = *stats; + session->port[MVX_DIR_INPUT].enc_stats_num++; + } else { + MVX_SESSION_ERR(session, "fw is not ready for long time, too many enc stats pending:%d",enc_stats_num); + } + return 0; + } + ret = queue_enc_stats(session, stats); + return ret; +} + +int mvx_session_set_chr_cfg(struct mvx_session *session, + struct mvx_chr_cfg *chr_cfg) +{ + int ret = 0; + int chr_cfg_num = 0; + if (is_fw_loaded(session) == false || + session->port[MVX_DIR_INPUT].is_flushing != false) { + chr_cfg_num = session->port[MVX_DIR_INPUT].chr_cfg_num; + if (chr_cfg_num < MVX_ROI_QP_NUMS) { + MVX_SESSION_INFO(session, "fw is not ready!!!, pending chr cfg num:%d",chr_cfg_num); + session->port[MVX_DIR_INPUT].chr_cfg_queue[chr_cfg_num] = *chr_cfg; + session->port[MVX_DIR_INPUT].chr_cfg_num++; + } else { + MVX_SESSION_ERR(session, "fw is not ready for long time, too many chr cfg pending:%d",chr_cfg_num); + } + return 0; + } + ret = queue_chr_cfg(session, chr_cfg); + return ret; +} + +int mvx_session_set_huff_table (struct mvx_session *session, + struct mvx_huff_table *table) +{ + if (is_fw_loaded(session) !=false) + return -EBUSY; + memcpy(&session->huff_table, table, sizeof(struct mvx_huff_table)); + return 0; +} + +int mvx_session_set_seamless_target (struct mvx_session *session, + struct mvx_seamless_target *seamless) +{ + if (is_fw_loaded(session) !=false) + return -EBUSY; + memcpy(&session->seamless_target, seamless, sizeof(struct mvx_seamless_target)); + return 0; +} + +int mvx_session_set_init_qp_i(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->init_qpi = val; + + return 0; +} + +int mvx_session_set_init_qp_p(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->init_qpp = val; + + return 0; +} + +int mvx_session_set_sao_luma(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->sao_luma = val; + + return 0; +} + +int mvx_session_set_sao_chroma(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->sao_chroma = val; + + return 0; +} + +int mvx_session_set_delta_I_P(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->qp_delta_i_p = val; + + return 0; +} + +int mvx_session_set_ref_rb_eb(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->ref_rb_en = val; + + return 0; +} + +int mvx_session_set_rc_clip_top(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->rc_qp_clip_top = val; + + return 0; +} + +int mvx_session_set_rc_clip_bot(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->rc_qp_clip_bot = val; + + return 0; +} + +int mvx_session_set_qpmap_clip_top(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->qpmap_qp_clip_top = val; + + return 0; +} + +int mvx_session_set_qpmap_clip_bot(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->qpmap_qp_clip_bot = val; + + return 0; +} +int mvx_session_set_max_qp_i(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + { + int ret; + struct mvx_fw_set_option option; + enum mvx_direction dir = get_bitstream_port(session); + int codec = session->port[dir].format; + int _qp_min, _qp_max; + + mvx_session_get_fmt_qp_range(codec, &_qp_min, &_qp_max); + val = min(val, _qp_max); + if (val < session->min_qp_i) + session->min_qp_i = _qp_min; + + option.code = MVX_FW_SET_QP_RANGE_I; + option.qp_range.min = session->min_qp_i; + option.qp_range.max = val; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + session->max_qp_i = val; + + return 0; + +} + +int mvx_session_set_min_qp_i(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + { + int ret; + struct mvx_fw_set_option option; + enum mvx_direction dir = get_bitstream_port(session); + int codec = session->port[dir].format; + int _qp_min, _qp_max; + + mvx_session_get_fmt_qp_range(codec, &_qp_min, &_qp_max); + val = max(val, _qp_min); + if (val > session->max_qp_i) + session->max_qp_i = _qp_max; + + option.code = MVX_FW_SET_QP_RANGE_I; + option.qp_range.min = val; + option.qp_range.max = session->max_qp_i; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + session->min_qp_i = val; + + return 0; +} + +int mvx_session_set_fixedqp(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->fixedqp = val; + + return 0; + +} +int mvx_session_set_visible_width(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + session->visible_width = val; + + return 0; + +} + +int mvx_session_set_visible_height(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + session->visible_height = val; + + return 0; + +} + +int mvx_session_set_gop_reset_pframes(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + { + int ret; + struct mvx_fw_set_option option; + option.code = MVX_FW_SET_GOP_PFRAMES; + option.reset_gop_pframes = val; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + session->reset_gop_pframes = val; + return 0; +} + +int mvx_session_set_ltr_reset_period(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + { + int ret; + struct mvx_fw_set_option option; + option.code = MVX_FW_SET_LTR_PERIOD; + option.reset_ltr_period = val; + ret = fw_set_option(session, &option); + if (ret != 0) + return ret; + } + + session->reset_ltr_period = val; + return 0; +} + +int mvx_session_set_gdr_number(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->gdr_number = val; + + return 0; +} + +int mvx_session_set_gdr_period(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->gdr_period = val; + + return 0; +} + +int mvx_session_set_mulit_sps_pps(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->mulit_sps_pps = val; + + return 0; +} + +int mvx_session_set_enable_visual(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->enable_visual = val; + + return 0; +} + +int mvx_session_set_adaptive_intra_block(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->adaptive_intra_block = val; + + return 0; + +} + +int mvx_session_set_scd_enable(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->scd_enable = val; + + return 0; +} + +int mvx_session_set_scd_percent(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->scd_percent = val; + + return 0; +} + +int mvx_session_set_scd_threshold(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->scd_threshold = val; + + return 0; +} + +int mvx_session_set_aq_ssim_en(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->aq_ssim_en = val; + + return 0; +} + +int mvx_session_set_aq_neg_ratio(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->aq_neg_ratio = val; + + return 0; +} + +int mvx_session_set_aq_pos_ratio(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->aq_pos_ratio = val; + + return 0; +} + +int mvx_session_set_aq_qpdelta_lmt(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->aq_qpdelta_lmt = val; + + return 0; +} + +int mvx_session_set_aq_init_frm_avg_svar(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->aq_init_frm_avg_svar = val; + + return 0; +} + +int mvx_session_set_color_conversion(struct mvx_session *session, + enum mvx_yuv_to_rgb_mode mode) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + session->color_conv_mode = mode; + session->use_cust_color_conv_coef=false; + + return 0; +} + +int mvx_session_set_color_conversion_ceof(struct mvx_session *session, + struct mvx_color_conv_coef *conv_coef) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + memcpy(&session->color_conv_coef, conv_coef, sizeof(struct mvx_color_conv_coef)); + session->use_cust_color_conv_coef=true; + + return 0; +} + +int mvx_session_set_rgb_conv_yuv_coef(struct mvx_session *session, + struct mvx_rgb2yuv_color_conv_coef *conv_coef) +{ + if (session->error != 0) + return session->error; + if (is_fw_loaded(session) != false) + return -EBUSY; + + memcpy(&session->rgb2yuv_color_conv_coef, conv_coef, sizeof(struct mvx_rgb2yuv_color_conv_coef)); + session->use_cust_color_conv_coef=true; + session->use_cust_rgb_to_yuv_mode = MVX_CUST_YUV2RGB_MODE_CUSTOMIZED; + + return 0; +} + +int mvx_session_set_forced_uv_value(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->forced_uv_value = val; + + return 0; +} + +int mvx_session_set_dsl_interpolation_mode(struct mvx_session *session, + int mode) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->dsl_interp_mode = mode; + + return 0; +} + +int mvx_session_set_disabled_features(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->disabled_features = val; + + return 0; +} + +int mvx_session_set_crop(struct mvx_session *session, + struct mvx_crop_cfg *crop) +{ + enum mvx_direction dir; + + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + memcpy(&session->crop, crop, sizeof(struct mvx_crop_cfg)); + for (dir = 0; dir < MVX_DIR_MAX; dir++) { + struct mvx_session_port *p = &session->port[dir]; + if (mvx_is_frame(p->format)) { + p->width = session->orig_width; + p->height = session->orig_height; + try_format(session, dir, p->format, + &p->width, &p->height, &p->nplanes, + p->stride, p->size, + &p->interlaced); + } else if (dir == MVX_DIR_OUTPUT) { + p->width = crop->width; + p->height = crop->height; + } + } + + return 0; +} + +int mvx_session_set_dual_afbc_downscaled(struct mvx_session *session, + int val) +{ + struct mvx_session_port *p = &session->port[MVX_DIR_OUTPUT]; + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->dual_afbc_downscaled = !!val; + p->width = session->orig_width; + p->height = session->orig_height; + try_format(session, MVX_DIR_OUTPUT, p->format, + &p->width, &p->height, &p->nplanes, + p->stride, p->size, + &p->interlaced); + + return 0; +} + +int mvx_session_set_job_frames(struct mvx_session *session, + int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->job_frames = val; + + return 0; +} + +int mvx_session_set_force_key_frame(struct mvx_session *session, + uint32_t val) +{ + if (session->error != 0) + return session->error; + + session->force_key_frame = val; + + return 0; +} + +int mvx_session_update_input_buffer_min(struct mvx_session *session) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->port[MVX_DIR_INPUT].buffer_min = session->b_frames + 1; + if (session->port[MVX_DIR_INPUT].width * session->port[MVX_DIR_INPUT].height <= 8192 * 8192) + session->port[MVX_DIR_INPUT].buffer_min += MVX_ENCODE_EXTRA_BUFFER_NUM; + + return 0; +} + +int mvx_session_update_p_frames(struct mvx_session *session) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + // GOP = P-frames * (B-frames + 1) + 1 + session->p_frames = (session->gop_size - 1) / (session->b_frames + 1); + + return 0; +} + +int mvx_session_set_fsf_mode(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->fsf_mode = val; + + return 0; +} + +int mvx_session_set_priority(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + if (val >= 0) + session->priority = val; + + return 0; +} + +struct mvx_session_format_map *mvx_session_find_format(uint32_t pixelformat) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mvx_compressed_fmts); i++) + if (mvx_compressed_fmts[i].pixelformat == pixelformat) + return &mvx_compressed_fmts[i]; + + for (i = 0; i < ARRAY_SIZE(mvx_raw_fmts); i++) + if (mvx_raw_fmts[i].pixelformat == pixelformat) + return &mvx_raw_fmts[i]; + + return ERR_PTR(-EINVAL); +} + +struct mvx_session_format_map *mvx_session_get_compressed_format(struct mvx_session *session) +{ + enum mvx_direction dir = get_bitstream_port(session); + if (dir < MVX_DIR_MAX) + return mvx_session_find_format(session->port[dir].pixelformat); + else + return ERR_PTR(-EINVAL); +} + +uint32_t mvx_get_format_bpp(enum mvx_format format) +{ + int i; + for (i = 0; i < ARRAY_SIZE(mvx_raw_fmts); i++) + if (mvx_raw_fmts[i].format == format) + return mvx_raw_fmts[i].bpp; + + return 0; +} + +void mvx_session_enum_framesizes(struct mvx_session *session, + bool is_encoder, enum mvx_format format, + uint32_t *min_width, uint32_t *min_height, + uint32_t *max_width, uint32_t *max_height, + uint32_t *step_width, uint32_t *step_height) +{ + struct mvx_hw_ver hw_ver; + + session->client_ops->get_hw_ver(session->client_ops, &hw_ver); + switch(format) { + case MVX_FORMAT_AVS: + *max_width = 1920; + *max_height = 1080; + break; + case MVX_FORMAT_AVS2: + case MVX_FORMAT_H264: + case MVX_FORMAT_HEVC: + case MVX_FORMAT_VP9: + case MVX_FORMAT_AV1: + if (hw_ver.svn_revision == MVE_SVN_4K) { + *max_width = 4096; + *max_height = 4096; + } else { + *max_width = 8192; + *max_height = 8192; + } + break; + case MVX_FORMAT_H263: + case MVX_FORMAT_MPEG4: + case MVX_FORMAT_VP8: + *max_width = 2048; + *max_height = 2048; + break; + case MVX_FORMAT_JPEG: + if (is_encoder) { + *max_width = 16384; + *max_height = 16384; + } else { + *max_width = 32768; + *max_height = 32768; + } + break; + case MVX_FORMAT_MPEG2: + case MVX_FORMAT_RV: + *max_width = 4096; + *max_height = 4096; + break; + case MVX_FORMAT_VC1: + *max_width = 2048; + *max_height = 4096; + break; + default: + if (is_encoder) { + *max_width = 16384; + *max_height = 16384; + } else { + *max_width = 8192; + *max_height = 8192; + } + break; + } + *min_width = 144; + *min_height = 144; + *step_width = 2; + *step_height = 2; +} + +void mvx_session_cancel_work(struct mvx_session *session) +{ + cancel_work_sync(&session->flush_and_queue_work); + cancel_work_sync(&session->watchdog_work); + watchdog_stop(session); +} + +static void mvx_session_construct_realtime_fps_msg(struct mvx_session *session, + int avgfps, int rtfps, uint64_t frame_count, time64_t start_sec, time64_t end_sec) +{ + if (mvx_log_perf.rtfps && mvx_log_perf.rtfps_num < MVX_LOG_FPS_MSG_UNITS) { + struct rtc_time start, end; + enum mvx_direction dir = session->is_encoder ? MVX_DIR_OUTPUT : MVX_DIR_INPUT; + struct mvx_session_port *p = &session->port[dir]; + struct mvx_session_format_map *map = mvx_session_find_format(p->pixelformat); + + rtc_time64_to_tm(start_sec, &start); + rtc_time64_to_tm(end_sec, &end); + + mutex_lock(&mvx_log_perf.mutex); + + snprintf(mvx_log_perf.rtfps + MVX_LOG_FPS_MSG_UNIT_SIZE * mvx_log_perf.rtfps_num, + MVX_LOG_FPS_MSG_UNIT_SIZE, + "%02d:%02d:%02d ~ %02d:%02d:%02d [%px] P%d %s %s %dx%d %lld frames, current fps %d.%02d, average fps %d.%02d\n", + start.tm_hour, start.tm_min, start.tm_sec, + end.tm_hour, end.tm_min, end.tm_sec, + session, session->priority, map->description, session->is_encoder ? "encoder" : "decoder", + p->width, p->height, frame_count, rtfps / 100, rtfps % 100, avgfps / 100, avgfps % 100); + + mvx_log_perf.rtfps_num++; + + mutex_unlock(&mvx_log_perf.mutex); + } +} + +static int mvx_session_calculate_realtime_fps(struct mvx_session *session, + int ts_index, uint64_t frame_count) +{ + int i, curr; + int last = ts_index; + struct timespec64 *last_ts = session->ts + last; + struct timespec64 first_ts = {last_ts->tv_sec - 1, last_ts->tv_nsec}; + struct timespec64 delta; + uint64_t delta_us; + + frame_count = min(frame_count, (uint64_t)MAX_RT_FPS_FRAMES); + curr = last; + for (i = 0; i < frame_count - 1; i++) { + curr--; + if (curr < 0) + curr = MAX_RT_FPS_FRAMES - 1; + if (timespec64_compare(&session->ts[curr], &first_ts) <= 0) + break; + } + + delta = timespec64_sub(*last_ts, session->ts[curr]); + delta_us = timespec64_to_ns(&delta) / 1000; + frame_count = i + 1; + return (int)((frame_count * 100 * 1000 * 1000) / delta_us); +} + +void mvx_session_update_realtime_fps(struct mvx_session *session) +{ + int ts_index, avgfps, rtfps; + uint64_t frame_count; + + if (!(mvx_log_perf.enabled & MVX_LOG_PERF_FPS) || !session->ts) + return; + + mutex_lock(&session->fps_mutex); + ts_index = session->ts_index; + frame_count = session->frame_count; + mutex_unlock(&session->fps_mutex); + + if (frame_count <= FPS_SKIP_FRAMES) + return; + + ts_index = ts_index == 0 ? MAX_RT_FPS_FRAMES - 1 : ts_index - 1; + avgfps = mvx_session_calculate_average_fps(session, ts_index, frame_count); + rtfps = mvx_session_calculate_realtime_fps(session, ts_index, frame_count); + mvx_session_construct_realtime_fps_msg(session, + avgfps, rtfps, frame_count, session->start.tv_sec, session->ts[ts_index].tv_sec); +} + +void mvx_session_update_buffer_count(struct mvx_session *session, + enum mvx_direction dir) +{ + struct mvx_session_port *port = &session->port[dir]; + int i; + uint32_t frame_size = 0; + for (i = 0; i < port->nplanes; ++i) + frame_size += port->size[i]; + port->buffer_max = clamp_t(uint32_t, port->rest_frame_map_size / frame_size, + 1, VIDEO_MAX_FRAME); + port->buffer_min = min(port->buffer_min, port->buffer_max); +} + +int mvx_session_switch_out(struct mvx_session *session) +{ + return fw_switch_out(session); +} + +int mvx_session_set_enc_lambda_scale(struct mvx_session *session, + struct mvx_lambda_scale *lambda_scale) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + memcpy(&session->lambda_scale, lambda_scale, sizeof(struct mvx_lambda_scale)); + + return 0; +} + +int mvx_session_set_enc_intra_ipenalty_angular(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->intra_ipenalty_angular = val; + + return 0; +} + +int mvx_session_set_enc_intra_ipenalty_planar(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->intra_ipenalty_planar = val; + + return 0; +} + +int mvx_session_set_enc_intra_ipenalty_dc(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->intra_ipenalty_dc = val; + + return 0; +} + +int mvx_session_set_enc_inter_ipenalty_angular(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->inter_ipenalty_angular = val; + + return 0; +} + +int mvx_session_set_enc_inter_ipenalty_planar(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->inter_ipenalty_planar = val; + + return 0; +} + +int mvx_session_set_enc_inter_ipenalty_dc(struct mvx_session *session, int val) +{ + if (session->error != 0) + return session->error; + + if (is_fw_loaded(session) != false) + return -EBUSY; + + session->inter_ipenalty_dc = val; + + return 0; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/mvx_session.h b/drivers/media/platform/cix/cix_vpu/if/mvx_session.h new file mode 100755 index 000000000000..ad02369a17d6 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/mvx_session.h @@ -0,0 +1,1496 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_SESSION_H_ +#define _MVX_SESSION_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include "mvx_buffer.h" +#include "mvx_firmware.h" +#include "mvx_firmware_cache.h" +#include "mvx_if.h" +#include "mvx_log_group.h" + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define MVX_SESSION_LOG(severity, session, msg, ...) \ + MVX_LOG_PRINT_SESSION(&mvx_log_session_if, severity, session, \ + msg, ## __VA_ARGS__) + +#define MVX_SESSION_VERBOSE(session, msg, ...) \ + MVX_SESSION_LOG(MVX_LOG_VERBOSE, session, msg, ## __VA_ARGS__) + +#define MVX_SESSION_DEBUG(session, msg, ...) \ + MVX_SESSION_LOG(MVX_LOG_DEBUG, session, msg, ## __VA_ARGS__) + +#define MVX_SESSION_INFO(session, msg, ...) \ + MVX_SESSION_LOG(MVX_LOG_INFO, session, msg, ## __VA_ARGS__) + +#define MVX_SESSION_WARN(session, msg, ...) \ + MVX_SESSION_LOG(MVX_LOG_WARNING, session, msg, ## __VA_ARGS__) + +#define MVX_SESSION_ERR(session, msg, ...) \ + MVX_SESSION_LOG(MVX_LOG_ERROR, session, msg, ## __VA_ARGS__) + +#define AFBC_SUPERBLOCK_SHIFT 4 +#define AFBC_MIN_WIDTH_IN_SUPERBLOCKS 9 + +/* Exta input buffer number for optimum encode performance. */ +#define MVX_ENCODE_EXTRA_BUFFER_NUM 1 + +/* Planar buffer number for optimum decode performance of non-afbc output. */ +#define MVX_DECODE_AV1_PLANNAR_BUFFER_NUM 7 +#define MVX_DECODE_PLANNAR_BUFFER_NUM 4 + +#define MVX_DECODE_MAX_REJECTED_BUFFER_INTERVAL 15 + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct file; +struct mvx_csched; +struct mvx_fw_cache; +struct poll_table_struct; + +/** + * enum mvx_session_event - Session events. + * @MVX_SESSION_EVENT_BUFFER: struct mvx_buffer. + * @MVX_SESSION_EVENT_PORT_CHANGED: enum mvx_direction. + * @MVX_SESSION_EVENT_COLOR_DESC: struct mvx_fw_color_desc. + * @MVX_SESSION_EVENT_ERROR: void + */ +enum mvx_session_event { + MVX_SESSION_EVENT_BUFFER, + MVX_SESSION_EVENT_PORT_CHANGED, + MVX_SESSION_EVENT_COLOR_DESC, + MVX_SESSION_EVENT_ERROR +}; + +/** + * struct mvx_session_port - Session input and output port settings. + * @format: Port format. + * @pixelformat: V4L2 pixelformat. + * @width: Width in pixels. + * @height: Height in pixels. + * @nplanes: Number for planes for current format. + * @stride: Stride per line in bytes for each plane. + * @size: Size in bytes for each plane. + * @afbc_alloc_bytes: Minimum number of bytes required for AFBC. + * @afbc_width: AFBC width in superblocks. + * @stream_on: Boolean if the port has been enabled. + * @buffer_min: Minimum number of buffers required. + * @buffer_count: Number of buffers currently queued to firmware. + * @buffer_allocated: Number of buffers allocated. + * @buffers_in_window: Number of buffers cumulative queued in current work load calculation window. + * @buffer_queue: Buffers waiting to be queued to the firmware. + * @is_flushing: Set true when port is waiting for a fw flush confirm. + * @flushed: Port has been flushed an no buffers have been queued. + * @interlaced: True if frames are interlaced. + */ +struct mvx_session_port { + enum mvx_format format; + unsigned int pixelformat; + unsigned int width; + unsigned int height; + unsigned int new_width; + unsigned int new_height; + uint8_t nplanes; + unsigned int stride[MVX_BUFFER_NPLANES]; + unsigned int size[MVX_BUFFER_NPLANES]; + unsigned int afbc_alloc_bytes; + unsigned int afbc_width; + unsigned int afbc_alloc_bytes_downscaled; + unsigned int afbc_width_in_superblocks_downscaled; + bool stream_on; + unsigned int buffer_min; + unsigned int buffer_max; + unsigned int buffer_count; + unsigned int buffer_allocated; + unsigned int buffers_in_window; + unsigned int rest_frame_map_size; + struct list_head buffer_queue; + struct list_head buffer_done_queue; + bool is_flushing; + bool flushed; + bool interlaced; + bool last_interlaced_from_sps; + unsigned int scaling_shift; + struct mvx_roi_config roi_config_queue[MVX_ROI_QP_NUMS]; + struct mvx_chr_cfg chr_cfg_queue[MVX_ROI_QP_NUMS]; + struct mvx_enc_stats enc_stats_queue[MVX_ROI_QP_NUMS]; + struct mvx_buffer_param_qp qp_queue[MVX_ROI_QP_NUMS]; + struct mvx_osd_config osd_cfg_queue[MVX_ROI_QP_NUMS]; + unsigned int roi_config_num; + unsigned int qp_num; + unsigned int chr_cfg_num; + unsigned int enc_stats_num; + unsigned int osd_cfg_num; + unsigned int display_size[MVX_BUFFER_NPLANES]; + enum mvx_format display_size_format; + unsigned int afbc_crop_left; + unsigned int afbc_crop_top; + bool in_port_changing; + bool hold_off_buffer_done; + bool received_seq_param; + bool pending_source_change_event; + unsigned int dump_count; + enum mvx_format old_format; + unsigned int old_pixelformat; + bool buffer_rejected_flag; + int frames_since_last_buffer_rejected; + int last_buffer_width; + int last_buffer_height; +}; + +/** + * struct mvx_session_qp - QP settings. + * @i_frame: QP for I frame. + * @p_frame: QP for P frame. + * @b_frame: QP for B frame. + * @min: Minimum QP value. + * @max: Maximum QP value. + */ +struct mvx_session_qp { + int i_frame; + int p_frame; + int b_frame; + int min; + int max; +}; + +#pragma pack(push, 1) +/** + * struct mvx_ivf_header - IVF header data. + * @signature: DKIF signature. + * @version: IVF version. + * @length: Length of IVF header. + * @fourcc: Format fourcc. + * @width: Width. + * @height: Height. + * @frameRate: Framerate. + * @timeScale: Timescale. + * @frameCount: Total frame count. + * @padding: Padding value. + */ +struct mvx_ivf_header { + uint32_t signature; + uint16_t version; + uint16_t length; + uint32_t fourcc; + uint16_t width; + uint16_t height; + uint32_t frameRate; + uint32_t timeScale; + uint32_t frameCount; + uint32_t padding; +}; + +/** + * struct mvx_ivf_frame - IVF frame header data. + * @size: Byte size for current frame without header. + * @timestamp: Frame timestamp. + */ +struct mvx_ivf_frame { + uint32_t size; + uint64_t timestamp; +}; +#pragma pack(pop) + +struct mvx_session_format_map { + enum mvx_format format; + uint32_t flags; + uint32_t pixelformat; + const char *description; + uint32_t bitdepth; + uint32_t bpp; + enum mvx_format to10_format; + uint32_t to10_pixelformat; + enum mvx_format to8_format; + uint32_t to8_pixelformat; +}; + +/** + * struct mvx_session - Session instance. + * @dev: Pointer to device. + * @cache: Pointer to firmware cache. + * @isession: This instance is used to register the session to the + * client. + * @client_ops: Client operations. + * @csession: Client session. + * @destructor: When the isession.kref reaches zero and after the + * session + * object has been destructed, this callback routine is + * invoked + * to allow the owner of the session object to clean up any + * allocated resources. + * @event: Event callback routine. + * @mutex: Mutex protecting the session objects. + * @port: Input and output port settings. + * @mmu: MMU instance. + * @fw: Firmware instance. + * @fw_bin: Pointer to firmware binary. + * @fw_event: Event handler for loading a firmware binary. + * @fw_state: Current firmware state. + * @waitq: Wait queue to signal changes to the session. + * @dentry: Debugfs directory entry for the session. + * @frame_rate: Frame rate in Q16 format. + * @target_bitrate: Bitrate. + * @rc_enabled: Defines if rate control is enabled for the session. + * @profile: Profile for encoder. + * @level: Level for encoder. + * @nalu_format: NALU format. + * @stream_escaping: Defines if stream escaping is enabled. + * @ignore_stream_headers:Defines if decoder should ignore stream headers. + * @frame_reordering: Defines if decoder should reorder frames. + * @intbuf_size: Suggested internal buffer size. + * @p_frames: Number of P-frames for encoder. + * @b_frames: Number of B-frames for encoder. + * @gop_size: GOP size. + * @gop_type: GOP type. + * @cyclic_intra_refresh_mb:Intra MB refresh. + * @constr_ipred: Constrained intra prediction. + * @entropy_sync: Enabled entropy synchronization. + * @temporal_mvp: Enable temporal motion vector prediction. + * @tile_rows: Tile size. + * @tile_cols: Tile size. + * @min_luma_cb_size: Minimum luma coding block size. + * @mb_mask: MB mask. + * @entropy_mode: Entropy mode. + * @multi_slice_mode: Multi slice mode. + * @multi_slice_max_mb: Maximum number of macroblocks in a slice. + * @vp9_prob_update: Probability update method. + * @mv_h_search_range: Horizontal search range. + * @mv_v_search_range: Vertical search range. + * @bitdepth_chroma: Bitdepth for chroma. + * @bitdepth_luma: Bitdepth for luma. + * @force_chroma_format:Chroma format. + * @rgb_to_yuv: RGB to YUV conversion mode. + * @band_limit: Maximum bandwidth limit. + * @cabac_init_idc: CABAC initialization table. + * @qp: QP settings per codec. + * @resync_interval: JPEG resync interval. + * @jpeg_quality: JPEG quality level. + * @color_desc: HDR color description. + * + * There is one session for each file handle that has been opened from the + * video device. + * + * There is a separate set of QP controls for every codec. There is no + * information on which codec will be used when controls are initialized with + * their default values. That's why a set of QP-settings is maintained for + * every codec. + */ +struct mvx_session { + struct device *dev; + struct mvx_fw_cache *cache; + struct mvx_if_session isession; + struct mvx_client_ops *client_ops; + struct mvx_client_session *csession; + void (*destructor)(struct mvx_session *session); + void (*event)(struct mvx_session *session, + enum mvx_session_event event, + void *arg); + struct mvx_session_port port[MVX_DIR_MAX]; + struct mvx_mmu mmu; + struct mvx_fw fw; + struct mvx_fw_bin *fw_bin; + struct mvx_fw_event fw_event; + struct completion fw_loaded; + enum mvx_fw_state fw_state; + wait_queue_head_t waitq; + struct timer_list watchdog_timer; + struct work_struct watchdog_work; + struct work_struct flush_and_queue_work; + unsigned int watchdog_count; + bool switched_in; + unsigned int setting_stride[MVX_BUFFER_NPLANES];//upstream setting stride + unsigned int idle_count; + long error; + struct dentry *dentry; + + uint32_t fps_n; + uint32_t fps_d; + uint32_t last_fps; + struct timespec64 last_timespec; + unsigned int target_bitrate; + unsigned int maximum_bitrate; + bool rc_enabled; + int rc_type; + enum mvx_profile profile[MVX_FORMAT_BITSTREAM_LAST + 1]; + enum mvx_level level[MVX_FORMAT_BITSTREAM_LAST + 1]; + enum mvx_tier tier[MVX_FORMAT_BITSTREAM_LAST + 1]; + enum mvx_nalu_format nalu_format; + enum mvx_tristate stream_escaping; + enum mvx_tristate ignore_stream_headers; + enum mvx_tristate frame_reordering; + int64_t intbuf_size; + int p_frames; + int b_frames; + int gop_size; + enum mvx_gop_type gop_type; + int cyclic_intra_refresh_mb; + enum mvx_tristate constr_ipred; + enum mvx_tristate entropy_sync; + enum mvx_tristate temporal_mvp; + int tile_rows; + int tile_cols; + int min_luma_cb_size; + int mb_mask; + enum mvx_entropy_mode entropy_mode; + enum mvx_multi_slice_mode multi_slice_mode; + int multi_slice_max_mb; + enum mvx_vp9_prob_update vp9_prob_update; + int mv_h_search_range; + int mv_v_search_range; + int bitdepth_chroma; + int bitdepth_luma; + int coded_chroma_format; + int force_chroma_format; + enum mvx_rgb_to_yuv_mode rgb_to_yuv; + int band_limit; + int cabac_init_idc; + struct mvx_session_qp qp[MVX_FORMAT_BITSTREAM_LAST + 1]; + int resync_interval; + int jpeg_quality; + int jpeg_quality_luma; + int jpeg_quality_chroma; + struct mvx_fw_color_desc color_desc; + unsigned int orig_width; + unsigned int orig_height; + unsigned int crop_left; + unsigned int crop_right; + unsigned int crop_top; + unsigned int crop_bottom; + struct mvx_sei_userdata sei_userdata; + unsigned int nHRDBufsize; + struct mvx_dsl_frame dsl_frame; + struct mvx_dsl_ratio dsl_ratio; + struct mvx_long_term_ref mvx_ltr; + int dsl_pos_mode; + uint32_t mini_frame_cnt; + uint32_t init_qpi; + uint32_t init_qpp; + uint32_t sao_luma; + uint32_t sao_chroma; + uint32_t qp_delta_i_p; + uint32_t ref_rb_en; + uint32_t qpmap_qp_clip_top; + uint32_t qpmap_qp_clip_bot; + uint32_t rc_qp_clip_top; + uint32_t rc_qp_clip_bot; + uint32_t max_qp_i; + uint32_t min_qp_i; + uint32_t visible_width; + uint32_t visible_height; + struct mvx_huff_table huff_table; + uint32_t rc_bit_i_mode; + uint32_t rc_bit_i_ratio; + uint32_t inter_med_buf_size; + uint32_t svct3_level1_period; + uint32_t reset_gop_pframes; + uint32_t reset_ltr_period; + uint32_t fixedqp; + uint32_t gdr_number; + uint32_t gdr_period; + uint32_t mulit_sps_pps; + uint32_t enable_visual; + uint32_t scd_enable; + uint32_t scd_percent; + uint32_t scd_threshold; + uint32_t aq_ssim_en; + uint32_t aq_neg_ratio; + uint32_t aq_pos_ratio; + uint32_t aq_qpdelta_lmt; + uint32_t aq_init_frm_avg_svar; + uint32_t adaptive_intra_block; + struct mvx_seamless_target seamless_target; + enum mvx_yuv_to_rgb_mode color_conv_mode; + enum use_cust_yuv_to_rgb_mode use_cust_rgb_to_yuv_mode; + bool use_cust_color_conv_coef; + struct mvx_color_conv_coef color_conv_coef; + struct mvx_rgb2yuv_color_conv_coef rgb2yuv_color_conv_coef; + uint32_t forced_uv_value; + uint32_t dsl_interp_mode; + uint32_t disabled_features; + uint32_t dual_afbc_downscaled; + struct mvx_crop_cfg crop; + struct mvx_osd_info osd_info; + uint32_t job_frames; + uint32_t force_key_frame; + bool pending_switch_out; + bool is_encoder; + struct timespec64 *ts; + struct timespec64 start; + uint64_t frame_count; + uint32_t ts_index; + struct mutex fps_mutex; + bool enable_buffer_dump; + struct file *data_fp; + int fsf_mode; + uint32_t priority; + uint32_t intra_ipenalty_angular; + uint32_t intra_ipenalty_planar; + uint32_t intra_ipenalty_dc; + uint32_t inter_ipenalty_angular; + uint32_t inter_ipenalty_planar; + uint32_t inter_ipenalty_dc; + struct mvx_lambda_scale lambda_scale; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_session_construct - Construct the session object. + * @session: Pointer to session. + * @dev: Pointer to device. + * @client_ops: Pointer to client ops. + * @cache: Pointer to firmware cache. + * @mutex: Pointer to mutex protecting the session object. + * @destructor: Destructor that will be invoked after the session referece count + * has reached zero. The destructor may be NULL if the owner of the + * session object does not need to be notified. + * @event: Event notification from the session to the client. This function + * must not call session API which could take mvx_session mutex. + * @dsession: Debugfs directory entry for the session. + * + * Return: 0 on success, else error code. + */ +int mvx_session_construct(struct mvx_session *session, + struct device *dev, + struct mvx_client_ops *client_ops, + struct mvx_fw_cache *cache, + struct mutex *mutex, + void (*destructor)(struct mvx_session *session), + void (*event)(struct mvx_session *session, + enum mvx_session_event event, + void *arg), + struct dentry *dsession, + bool is_encoder); + +/** + * mvx_session_construct - Destruct the session object. + * @session: Pointer to session. + */ +void mvx_session_destruct(struct mvx_session *session); + +/** + * mvx_session_get - Increment the session reference count. + * @session: Pointer to session. + */ +void mvx_session_get(struct mvx_session *session); + +/** + * mvx_session_put - Decrement the session reference count. + * @session: Pointer to session. + * + * If the reference count reaches 0 the session object will be destructed. + * + * Return: 1 if session was removed, else 0. + */ +int mvx_session_put(struct mvx_session *session); + +/** + * mvx_session_put - Get the session reference count. + * @session: Pointer to session. + * + * Return: session reference count. + */ +unsigned int mvx_session_ref_read(struct mvx_session *session); + +/** + * mvx_session_enum_format() - Enumerate format at offset index. + * @session: Pointer to session. + * @dir: Which direction to get formats for. + * @index: offset index of supported formats. + * + * Return: pointer of mxv_format_map object or NULL. + */ +struct mvx_session_format_map *mvx_session_enum_format(struct mvx_session *session, + enum mvx_direction dir, + int index); + +/** + * mvx_session_get_formats() - Get bitmask of supported formats. + * @session: Pointer to session. + * @dir: Which direction to get formats for. + * @formats: Pointer to bitmask listing supported formats. + */ +void mvx_session_get_formats(struct mvx_session *session, + enum mvx_direction dir, + uint64_t *formats); + +/** + * mvx_session_try_format() - Validate port format. + * @session: Pointer to session. + * @dir: Which direction to get formats for. + * @format: MVX format. + * @width: Width. Only valid for frame formats. + * @height: Height. Only valid for frame formats. + * @nplanes: Number for planes. + * @stride: Horizonal stride in bytes for each plane. + * @size: Size in bytes for each plane. + * @interlace: True if frames are interlaced. + * + * Return: 0 on success, else error code. + */ +int mvx_session_try_format(struct mvx_session *session, + enum mvx_direction dir, + enum mvx_format format, + unsigned int *width, + unsigned int *height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + bool *interlaced); + +/** + * mvx_session_set_format() - Validate and set port format. + * @session: Pointer to session. + * @dir: Which direction to get formats for. + * @format: MVX format. + * @pixelformat: V4L2 pixelformat. + * @width: Width. Only valid for frame formats. + * @height: Height. Only valid for frame formats. + * @nplanes: Number for planes. + * @stride: Horizonal stride in bytes for each plane. + * @size: Size in bytes for each plane. + * @interlaced: True if frames are interlaced. + * + * If *nplanes is 0, then the values of stride and size should be ignored, else + * size and stride should be used when setting the format. + * + * Return: 0 on success, else error code. + */ +int mvx_session_set_format(struct mvx_session *session, + enum mvx_direction dir, + enum mvx_format format, + unsigned int pixelformat, + unsigned int *width, + unsigned int *height, + uint8_t *nplanes, + unsigned int *stride, + unsigned int *size, + bool *interlaced); + +/** + * mvx_session_qbuf() - Queue a buffer. + * @session: Pointer to session. + * @buf: Pointer to buffer. + * + * Return: 0 on success, else error code. + */ +int mvx_session_qbuf(struct mvx_session *session, + enum mvx_direction dir, + struct mvx_buffer *buf); + +/** + * mvx_session_send_eos() - Queue an empty buffer with EOS flag. + * @session: Pointer to session. + * + * If firmware is loaded an empty input buffer will be queued with the EOS flag + * set. EOS will be propagated by the firmware to the output queue. + * + * If the firmware is not loaded a buffer will be dequeued from the output + * queue, cleared and returned with the EOS flag set. + * + * Return: 0 on success, else error code. + */ +int mvx_session_send_eos(struct mvx_session *session); + +/** + * mvx_session_streamon() - Enable stream on input or output port. + * @session: Pointer to session. + * @dir: Port direction. + * + * Both input and output ports must be enabled for streaming to begin. + * + * Return: 0 on success, else error code. + */ +int mvx_session_streamon(struct mvx_session *session, + enum mvx_direction dir); + +/** + * mvx_session_streamoff() - Disable stream on input or output port. + * @session: Pointer to session. + * @dir: Port direction. + * + * Return: 0 on success, else error code. + */ +int mvx_session_streamoff(struct mvx_session *session, + enum mvx_direction dir); + +/** + * mvx_session_start() - Start or resume mvx session. + * @session: Pointer to session. + * + * Return: 0 on success, else error code. + */ +int mvx_session_start(struct mvx_session *session); + +/** + * mvx_session_irq() - Handle IRQ event from the client. + * @isession: Pointer to if-session. + */ +void mvx_session_irq(struct mvx_if_session *isession); + +/** + * mvx_if_session_to_session() - Convert mvx_is_session to mvx_session. + * @session: Pointer to mvx_if_session object. + * + * Return: Pointer to mvx_session object. + */ +static inline struct mvx_session *mvx_if_session_to_session( + struct mvx_if_session *session) +{ + return container_of(session, struct mvx_session, isession); +} + +/** + * mvx_session_port_show() - Print debug information into seq-file. + * @port: Pointer to port. + * @s: Seq-file to print to. + */ +void mvx_session_port_show(struct mvx_session_port *port, + struct seq_file *s); + +/* + * Functions bellow implement different settings for a session. + * + * Most of options could be set only when the FW is in STOPPED state or not + * loaded. In this case the value will be stored in mvx_session structure + * and applied lated in fw_initial_setup(). + * + * Some options support runtime modification. For them we issue a command + * to mvx_fw module if the FW is loaded. For others we return -EBUSY if the + * FW is loaded. + * + * ATTENTION. Currently there is no way to querry from mvx_fw API of from + * mvx_session API if the option supports runtime configuration. + */ + +/** + * mvx_session_set_securevideo() - Enabled or disable secure video. + * @session: Session. + * @securevideo:Enable or disable secure video. + * + * Return: 0 on success, else error code. + */ +int mvx_session_set_securevideo(struct mvx_session *session, + bool securevideo); + +/** + * mvx_session_set_frame_rate() - Set frame rate. + * @session: Session. + * @frame_rate_n: Frame rate numerator. + * @frame_rate_d: Frame rate denominator. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_frame_rate(struct mvx_session *session, + uint32_t frame_rate_n, uint32_t frame_rate_d); + +/** + * mvx_session_set_rate_control() - Enable/disable rate controller. + * @session: Session. + * @enabled: Rate controller status. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_rate_control(struct mvx_session *session, + bool enabled); + +/** + * mvx_session_set_bitrate() - Set bitrate rate. + * @session: Session. + * @bitrate: Bitrate in bits per second. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_bitrate(struct mvx_session *session, + int bitrate); + +/** + * mvx_session_set_crop_left() - Set crop left. + * @session: Session. + * @left: encoder SPS crop param, left offset. + * + * Return: 0 in case of success, error code otherwise. + */ + +int mvx_session_set_crop_left(struct mvx_session *session, + int32_t left); + +/** + * mvx_session_set_crop_right() - Set crop right. + * @session: Session. + * @right: encoder SPS crop param, right offset. + * + * Return: 0 in case of success, error code otherwise. + */ + +int mvx_session_set_crop_right(struct mvx_session *session, + int32_t right); + +/** + * mvx_session_set_crop_top() - Set crop top. + * @session: Session. + * @top: encoder SPS crop param, top offset. + * + * Return: 0 in case of success, error code otherwise. + */ + +int mvx_session_set_crop_top(struct mvx_session *session, + int32_t top); + +/** + * mvx_session_set_crop_bottom() - Set crop bottom. + * @session: Session. + * @top: encoder SPS crop param, bottom offset. + * + * Return: 0 in case of success, error code otherwise. + */ + +int mvx_session_set_crop_bottom(struct mvx_session *session, + int32_t bottom); +/** + * mvx_session_set_nalu_format() - Set NALU format. + * @session: Session. + * @fmt: NALU format. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_nalu_format(struct mvx_session *session, + enum mvx_nalu_format fmt); + +/** + * mvx_session_set_stream_escaping() - Enable/disable stream escaping + * @session: Session. + * @status: Status + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_stream_escaping(struct mvx_session *session, + enum mvx_tristate status); + +/** + * mvx_session_set_profile() - Set profile for encoder. + * @session: Session. + * @format: Format. + * @profile: Encoder profile. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_profile(struct mvx_session *session, + enum mvx_format format, + enum mvx_profile profile); + +/** + * mvx_session_set_level() - Set level for encoder. + * + * @session: Session. + * @format: Format. + * @level: Encoder level. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_level(struct mvx_session *session, + enum mvx_format format, + enum mvx_level level); + +/** + * mvx_session_set_tier() - Set tier for encoder. + * + * @session: Session. + * @format: Format. + * @level: Encoder tier. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_tier(struct mvx_session *session, + enum mvx_format format, + enum mvx_tier tier); + +/** + * mvx_session_set_ignore_stream_headers() - Enable/disable stream headers + * ignore. + * @session: Session. + * @status: Status. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_ignore_stream_headers(struct mvx_session *session, + enum mvx_tristate status); + +/** + * mvx_session_set_frame_reordering() - Enable/disable frames reordering. + * @session: Session. + * @status: Status. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_frame_reordering(struct mvx_session *session, + enum mvx_tristate status); + +/** + * mvx_session_set_intbuf_size() - Set internal buffer size. + * @session: Session. + * @size: Size. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_intbuf_size(struct mvx_session *session, + int size); + +/** + * mvx_session_set_b_frame() - Set number of B-frames. + * @session: Session. + * @val: Number of B-frames. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_b_frames(struct mvx_session *session, + int val); + +/** + * mvx_session_set_gop_size() - Set GOP size. + * @session: Session. + * @val: GOP size. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_gop_size(struct mvx_session *session, + int val); + +/** + * mvx_session_set_gop_type() - Set GOP type. + * @session: Session. + * @gop_type: GOP type. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_gop_type(struct mvx_session *session, + enum mvx_gop_type gop_type); + +/** + * mvx_session_set_cyclic_intra_refresh_mb() - Set intra MB refresh. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_cyclic_intra_refresh_mb(struct mvx_session *session, + int val); + +/** + * mvx_session_set_constr_ipred() - Enabled/disable constrained intra + * prediction. + * @session: Session. + * @status: Status. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_constr_ipred(struct mvx_session *session, + enum mvx_tristate status); + +/** + * mvx_session_set_entropy_sync() - Enable/disable entropy synchronization. + * @session: Session. + * @status: Status. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_entropy_sync(struct mvx_session *session, + enum mvx_tristate status); + +/** + * mvx_session_set_temporal_mvp() - Enable/disable temporal MVP. + * @session: Session. + * @status: Status. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_temporal_mvp(struct mvx_session *session, + enum mvx_tristate status); + +/** + * mvx_session_set_tile_rows() - Set tile size. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_tile_rows(struct mvx_session *session, + int val); + +/** + * mvx_session_set_tile_cols() - Set tile size. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_tile_cols(struct mvx_session *session, + int val); + +/** + * mvx_session_set_min_luma_cb_size() - Set minimum luma coding block size. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_min_luma_cb_size(struct mvx_session *session, + int val); + +/** + * mvx_session_set_mb_mask() - Set MB mask. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_mb_mask(struct mvx_session *session, + int val); + +/** + * mvx_session_set_entropy_mode() - Set entropy mode. + * @session: Session. + * @mode: Entropy mode. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_entropy_mode(struct mvx_session *session, + enum mvx_entropy_mode mode); + +/** + * mvx_session_set_multi_slice_mode() - Set multi slice mode. + * @session: Session. + * @mode: Mode. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_multi_slice_mode(struct mvx_session *session, + enum mvx_multi_slice_mode mode); + +/** + * mvx_session_set_multi_slice_max_mb() - Set suggested number of CTUs in a + * slice. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_multi_slice_max_mb(struct mvx_session *session, + int val); + +/** + * mvx_session_set_vp9_prob_update() - Set probability update mode. + * @session: Session. + * @mode: Mode. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_vp9_prob_update(struct mvx_session *session, + enum mvx_vp9_prob_update mode); + +/** + * mvx_session_set_mv_h_search_range() - Set horizontal search range for motion + * vectors. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_mv_h_search_range(struct mvx_session *session, + int val); + +/** + * mvx_session_set_mv_v_search_range() - Set vertical search range for motion + * vectors. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_mv_v_search_range(struct mvx_session *session, + int val); + +/** + * mvx_session_set_bitdepth_chroma() - Set bitdepth. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_bitdepth_chroma(struct mvx_session *session, + int val); + +/** + * mvx_session_set_bitdepth_luma() - Set bitdepth. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_bitdepth_luma(struct mvx_session *session, + int val); + +/** + * mvx_session_set_force_chroma_format() - Set chroma format. + * @session: Session. + * @fmt: chroma format. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_force_chroma_format(struct mvx_session *session, + int fmt); + +/** + * mvx_session_set_rgb_to_yuv_mode() - Set RGB to YUV conversion mode. + * @session: Session. + * @mode: Mode. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_rgb_to_yuv_mode(struct mvx_session *session, + enum mvx_rgb_to_yuv_mode mode); + +/** + * mvx_session_set_band_limit() - Set maximum bandwidth limit. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_band_limit(struct mvx_session *session, + int val); + +/** + * mvx_session_set_cabac_init_idc() - Set CABAC initialization table. + * @session: Session. + * @val: Value. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_cabac_init_idc(struct mvx_session *session, + int val); + +/** + * mvx_session_set_i_frame_qp() - Set QP for I frames. + * @session: Session. + * @format: Format. + * @qp: Quantization parameter. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_i_frame_qp(struct mvx_session *session, + enum mvx_format format, + int qp); + +/** + * mvx_session_set_p_frame_qp() - Set QP for P frames. + * @session: Session. + * @format: Format. + * @qp: Quantization parameter. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_p_frame_qp(struct mvx_session *session, + enum mvx_format format, + int qp); + +/** + * mvx_session_set_b_frame_qp() - Set QP for B frames. + * @session: Session. + * @format: Format. + * @qp: Quantization parameter. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_b_frame_qp(struct mvx_session *session, + enum mvx_format format, + int qp); + +/** + * mvx_session_set_min_qp() - Set minimum value of QP range. + * @session: Session. + * @format: Format. + * @qp: Quantization parameter. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_min_qp(struct mvx_session *session, + enum mvx_format format, + int qp); + +/** + * mvx_session_set_max_qp() - Set maximum value of QP range. + * @session: Session. + * @format: Format. + * @qp: Quantization parameter. + * + * This option could be set in runtime. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_max_qp(struct mvx_session *session, + enum mvx_format format, + int qp); + +/** + * mvx_session_set_resync_interval() - Set resync interval for JPEG encoder. + * @session: Session. + * @val: Resync interval. + * + * Return: 0 in case of success, error code otherwise. + */ +int mvx_session_set_resync_interval(struct mvx_session *session, + int val); + +/** + * mvx_session_set_jpeg_quality() - Set JPEG quality. + * @session: Session. + * @val: Quality level (1-100). + * + * Return: 0 in case of success, error otherwise. + */ +int mvx_session_set_jpeg_quality(struct mvx_session *session, + int val); + +/** + * mvx_session_set_jpeg_quality_luma() - Set JPEG quality. + * @session: Session. + * @val: Quality level (1-100). + * + * Return: 0 in case of success, error otherwise. + */ + +int mvx_session_set_jpeg_quality_luma(struct mvx_session *session, + int val); +/** + * mvx_session_set_jpeg_quality_chroma() - Set JPEG quality. + * @session: Session. + * @val: Quality level (1-100). + * + * Return: 0 in case of success, error otherwise. + */ + +int mvx_session_set_jpeg_quality_chroma(struct mvx_session *session, + int val); + +/** + * mvx_session_get_color_desc() - Get color description. + * @session: Pointer to session. + * @color_desc: Color description. + * + * Return: 0 on success, else error code. + */ +int mvx_session_get_color_desc(struct mvx_session *session, + struct mvx_fw_color_desc *color_desc); + +/** + * mvx_session_set_color_desc() - Set color description. + * @session: Pointer to session. + * @color_desc: Color description. + * + * Return: 0 on success, else error code. + */ +int mvx_session_set_color_desc(struct mvx_session *session, + struct mvx_fw_color_desc *color_desc); + +/** + * mvx_session_set_roi_regions() - Set ROI regions. + * @session: Pointer to session. + * @roi: ROI regions. + * + * Return: 0 on success, else error code. + */ +int mvx_session_set_roi_regions(struct mvx_session *session, + struct mvx_roi_config *roi); + +/** + * mvx_session_set_qp_epr() - Set qp for epr config. + * @session: Pointer to session. + * @qp: qp. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_qp_epr(struct mvx_session *session, + struct mvx_buffer_param_qp *qp); + +/** + * mvx_session_set_sei_userdata() - Set SEI userdata. + * @session: Pointer to session. + * @userdata: SEI userdata. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_sei_userdata(struct mvx_session *session, + struct mvx_sei_userdata *userdata); + +/** + * mvx_session_set_hrd_buffer_size() - Set hrd buffer size. + * @session: Pointer to session. + * @size: hrd buffer size. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_hrd_buffer_size(struct mvx_session *session, + int size); + +/** + * mvx_session_set_dsl_frame() - Set DownScale dst frame. + * @session: Pointer to session. + * @dsl: DownScale dst frame. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_dsl_frame(struct mvx_session *session, + struct mvx_dsl_frame *dsl); + +/** + * mvx_session_set_dsl_ratio() - Set DownScale ratio. + * @session: Pointer to session. + * @dsl: DownScale ratio. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_dsl_ratio(struct mvx_session *session, + struct mvx_dsl_ratio *dsl); + +/** + * mvx_session_set_long_term_ref() - Set long term ref. + * @session: Pointer to session. + * @ltr: long term ref. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_long_term_ref(struct mvx_session *session, + struct mvx_long_term_ref *ltr); + +/** + * mvx_session_set_dsl_mode() - Set DownScale mode. + * @session: Pointer to session. + * @mode: DownScale mode, oly enable on high precision mode. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_dsl_mode(struct mvx_session *session, + int *mode); +/** + * mvx_session_set_dsl_mode() - Set DownScale mode. + * @session: Pointer to session. + * @cnt: Mini Frame BUffer cnt. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_mini_frame_cnt(struct mvx_session *session, + int *cnt); + +/** + * mvx_session_set_stats_mode() - Set Stats mode. + * @session: Pointer to session. + * @mode: Stats mode. + * + * Return: 0 on success, else error code. + */ + +int mvx_session_set_stats_mode(struct mvx_session *session, + struct mvx_enc_stats *stats); +int mvx_session_set_chr_cfg(struct mvx_session *session, + struct mvx_chr_cfg *chr_cfg); +int mvx_session_set_init_qp_i(struct mvx_session *session, + int val); +int mvx_session_set_init_qp_p(struct mvx_session *session, + int val); +int mvx_session_set_sao_luma(struct mvx_session *session, + int val); +int mvx_session_set_sao_chroma(struct mvx_session *session, + int val); +int mvx_session_set_delta_I_P(struct mvx_session *session, + int val); +int mvx_session_set_ref_rb_eb(struct mvx_session *session, + int val); +int mvx_session_set_rc_clip_top(struct mvx_session *session, + int val); +int mvx_session_set_rc_clip_bot(struct mvx_session *session, + int val); +int mvx_session_set_qpmap_clip_top(struct mvx_session *session, + int val); +int mvx_session_set_qpmap_clip_bot(struct mvx_session *session, + int val); +int mvx_session_set_max_qp_i(struct mvx_session *session, + int val); +int mvx_session_set_min_qp_i(struct mvx_session *session, + int val); +int mvx_session_set_fixedqp(struct mvx_session *session, + int val); +int mvx_session_set_visible_width(struct mvx_session *session, + int val); +int mvx_session_set_visible_height(struct mvx_session *session, + int val); +int mvx_session_set_huff_table (struct mvx_session *session, + struct mvx_huff_table *table); +int mvx_session_set_rc_bit_i_mode(struct mvx_session *session, + int val); +int mvx_session_set_rc_bit_i_ratio(struct mvx_session *session, + int val); +int mvx_session_set_inter_med_buf_size(struct mvx_session *session, + int val); +int mvx_session_set_svct3_level1_period(struct mvx_session *session, + int val); +int mvx_session_set_gop_reset_pframes(struct mvx_session *session, + int val); +int mvx_session_set_ltr_reset_period(struct mvx_session *session, + int val); +int mvx_session_set_gdr_number(struct mvx_session *session, + int val); +int mvx_session_set_gdr_period(struct mvx_session *session, + int val); +int mvx_session_set_mulit_sps_pps(struct mvx_session *session, + int val); +int mvx_session_set_enable_visual(struct mvx_session *session, + int val); +int mvx_session_set_scd_enable(struct mvx_session *session, + int val); +int mvx_session_set_scd_percent(struct mvx_session *session, + int val); +int mvx_session_set_scd_threshold(struct mvx_session *session, + int val); +int mvx_session_set_aq_ssim_en(struct mvx_session *session, + int val); +int mvx_session_set_aq_neg_ratio(struct mvx_session *session, + int val); +int mvx_session_set_aq_pos_ratio(struct mvx_session *session, + int val); +int mvx_session_set_aq_qpdelta_lmt(struct mvx_session *session, + int val); +int mvx_session_set_aq_init_frm_avg_svar(struct mvx_session *session, + int val); +int mvx_session_set_adaptive_intra_block(struct mvx_session *session, + int val); +int mvx_session_set_seamless_target(struct mvx_session *session, + struct mvx_seamless_target * seamless); +int mvx_session_set_color_conversion(struct mvx_session *session, + enum mvx_yuv_to_rgb_mode mode); +int mvx_session_set_color_conversion_ceof(struct mvx_session *session, + struct mvx_color_conv_coef *conv_coef); +int mvx_session_set_rgb_conv_yuv_coef(struct mvx_session *session, + struct mvx_rgb2yuv_color_conv_coef *conv_coef); +int mvx_session_set_forced_uv_value(struct mvx_session *session, + int val); +int mvx_session_set_dsl_interpolation_mode(struct mvx_session *session, + int mode); +int mvx_session_set_disabled_features(struct mvx_session *session, + int val); +int mvx_session_set_crop(struct mvx_session *session, + struct mvx_crop_cfg *crop); +int mvx_session_set_osd_config(struct mvx_session *session, + struct mvx_osd_config *osd); +int mvx_session_set_osd_info(struct mvx_session *session, + struct mvx_osd_info *osd_info); +int mvx_session_set_dual_afbc_downscaled(struct mvx_session *session, + int val); +int mvx_session_set_job_frames(struct mvx_session *session, + int val); +int mvx_session_set_force_key_frame(struct mvx_session *session, + uint32_t val); +int mvx_session_update_input_buffer_min(struct mvx_session *session); +int mvx_session_set_bitrate_mode(struct mvx_session *session, + int mode); +int mvx_session_set_max_bitrate(struct mvx_session *session, + int bitrate); +int mvx_session_set_fsf_mode(struct mvx_session *session, int val); +int mvx_session_set_priority(struct mvx_session *session, int val); +int mvx_session_update_p_frames(struct mvx_session *session); +struct mvx_session_format_map *mvx_session_find_format(uint32_t pixelformat); +struct mvx_session_format_map *mvx_session_get_compressed_format(struct mvx_session *session); +uint32_t mvx_get_format_bpp(enum mvx_format format); +void mvx_session_enum_framesizes(struct mvx_session *session, + bool is_encoder, enum mvx_format format, + uint32_t *min_width, uint32_t *min_height, + uint32_t *max_width, uint32_t *max_height, + uint32_t *step_width, uint32_t *step_height); +void mvx_session_cancel_work(struct mvx_session *session); +void mvx_session_update_realtime_fps(struct mvx_session *session); +int mvx_session_set_enc_lambda_scale(struct mvx_session *session, + struct mvx_lambda_scale *lambda_scale); +int mvx_session_set_enc_intra_ipenalty_angular(struct mvx_session *session, int val); +int mvx_session_set_enc_intra_ipenalty_planar(struct mvx_session *session, int val); +int mvx_session_set_enc_intra_ipenalty_dc(struct mvx_session *session, int val); +int mvx_session_set_enc_inter_ipenalty_angular(struct mvx_session *session, int val); +int mvx_session_set_enc_inter_ipenalty_planar(struct mvx_session *session, int val); +int mvx_session_set_enc_inter_ipenalty_dc(struct mvx_session *session, int val); +/** + * mvx_session_update_buffer_count() - Update session port buffer max and min. + * @session: Pointer to session. + * @dir: Session port direction. + */ +void mvx_session_update_buffer_count(struct mvx_session *session, + enum mvx_direction dir); +int mvx_session_switch_out(struct mvx_session *session); +#endif /* _MVX_SESSION_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_ext_if.h b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_ext_if.h new file mode 100755 index 000000000000..7966a17d1a6e --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_ext_if.h @@ -0,0 +1,93 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_EXT_H_ +#define _MVX_EXT_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; +struct mvx_csched; +struct mvx_fw_cache; + +enum mvx_ext_if_type { + MVX_EXT_IF_DECODER = 0, + MVX_EXT_IF_ENCODER, + MVX_EXT_IF_COUNT +}; + +struct mvx_ext_if { + struct device *dev; + struct mvx_fw_cache *cache; + struct mvx_client_ops *client_ops; + struct video_device vdev; + struct v4l2_device *v4l2_dev; + struct dentry *dsessions; + bool is_encoder; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_ext_if_construct() - Construct the external interface object. + * @ext: Pointer to interface object array. + * @dev: Pointer to device struct. + * @cache: Pointer to firmware cache. + * @client_ops: Pointer to client client_ops. + * @parent: Parent debugfs directory entry. + * + * Return: 0 on success, else error code. + */ +int mvx_ext_if_construct(struct mvx_ext_if ext[MVX_EXT_IF_COUNT], + struct device *dev, + struct mvx_fw_cache *cache, + struct mvx_client_ops *client_ops, + struct dentry *parent); + +/** + * mvx_ext_if_destruct() - Destroy external interface instance. + * @ext: Pointer to interface object array. + */ +void mvx_ext_if_destruct(struct mvx_ext_if ext[MVX_EXT_IF_COUNT]); + +#endif /* _MVX_EXT_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_ext_v4l2.c b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_ext_v4l2.c new file mode 100755 index 000000000000..26ece1126955 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_ext_v4l2.c @@ -0,0 +1,237 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvx_bitops.h" +#include "mvx_buffer.h" +#include "mvx_ext_if.h" +#include "mvx_firmware.h" +#include "mvx_if.h" +#include "mvx_mmu.h" +#include "mvx_session.h" + +#include "mvx_v4l2_buffer.h" +#include "mvx_v4l2_session.h" +#include "mvx_v4l2_vidioc.h" +#include "mvx_v4l2_fops.h" +#include "mvx_log_group.h" + +static const struct v4l2_file_operations mvx_v4l2_fops = { + .owner = THIS_MODULE, + .open = mvx_v4l2_open, + .release = mvx_v4l2_release, + .poll = mvx_v4l2_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = mvx_v4l2_mmap +}; + +static const struct v4l2_ioctl_ops mvx_v4l2_ioctl_ops = { + .vidioc_querycap = mvx_v4l2_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = mvx_v4l2_vidioc_enum_fmt_vid_cap, + .vidioc_enum_fmt_vid_out = mvx_v4l2_vidioc_enum_fmt_vid_out, + .vidioc_enum_framesizes = mvx_v4l2_vidioc_enum_framesizes, + .vidioc_g_fmt_vid_cap = mvx_v4l2_vidioc_g_fmt_vid_cap, + .vidioc_g_fmt_vid_cap_mplane = mvx_v4l2_vidioc_g_fmt_vid_cap, + .vidioc_g_fmt_vid_out = mvx_v4l2_vidioc_g_fmt_vid_out, + .vidioc_g_fmt_vid_out_mplane = mvx_v4l2_vidioc_g_fmt_vid_out, + .vidioc_s_fmt_vid_cap = mvx_v4l2_vidioc_s_fmt_vid_cap, + .vidioc_s_fmt_vid_cap_mplane = mvx_v4l2_vidioc_s_fmt_vid_cap, + .vidioc_s_fmt_vid_out = mvx_v4l2_vidioc_s_fmt_vid_out, + .vidioc_s_fmt_vid_out_mplane = mvx_v4l2_vidioc_s_fmt_vid_out, + .vidioc_try_fmt_vid_cap = mvx_v4l2_vidioc_try_fmt_vid_cap, + .vidioc_try_fmt_vid_cap_mplane = mvx_v4l2_vidioc_try_fmt_vid_cap, + .vidioc_try_fmt_vid_out = mvx_v4l2_vidioc_try_fmt_vid_out, + .vidioc_try_fmt_vid_out_mplane = mvx_v4l2_vidioc_try_fmt_vid_out, + .vidioc_g_selection = mvx_v4l2_vidioc_g_selection, + .vidioc_s_selection = mvx_v4l2_vidioc_s_selection, + .vidioc_g_parm = mvx_v4l2_vidioc_g_parm, + .vidioc_s_parm = mvx_v4l2_vidioc_s_parm, + .vidioc_streamon = mvx_v4l2_vidioc_streamon, + .vidioc_streamoff = mvx_v4l2_vidioc_streamoff, + .vidioc_encoder_cmd = mvx_v4l2_vidioc_encoder_cmd, + .vidioc_try_encoder_cmd = mvx_v4l2_vidioc_try_encoder_cmd, + .vidioc_decoder_cmd = mvx_v4l2_vidioc_decoder_cmd, + .vidioc_try_decoder_cmd = mvx_v4l2_vidioc_try_decoder_cmd, + .vidioc_reqbufs = mvx_v4l2_vidioc_reqbufs, + .vidioc_create_bufs = mvx_v4l2_vidioc_create_bufs, + .vidioc_querybuf = mvx_v4l2_vidioc_querybuf, + .vidioc_qbuf = mvx_v4l2_vidioc_qbuf, + .vidioc_dqbuf = mvx_v4l2_vidioc_dqbuf, + .vidioc_expbuf = mvx_v4l2_vidioc_expbuf, + .vidioc_subscribe_event = mvx_v4l2_vidioc_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_default = mvx_v4l2_vidioc_default +}; + +int mvx_ext_if_register_device(struct mvx_ext_if *ext, + const char *name, + bool is_encoder) +{ + ext->is_encoder = is_encoder; + + /* Video device. */ + ext->vdev.fops = &mvx_v4l2_fops; + ext->vdev.ioctl_ops = &mvx_v4l2_ioctl_ops; + ext->vdev.release = video_device_release_empty; + ext->vdev.vfl_dir = VFL_DIR_M2M; + ext->vdev.v4l2_dev = ext->v4l2_dev; + ext->vdev.device_caps = + V4L2_CAP_VIDEO_M2M | + V4L2_CAP_VIDEO_M2M_MPLANE | + V4L2_CAP_EXT_PIX_FORMAT | + V4L2_CAP_STREAMING; + strncpy(ext->vdev.name, name, sizeof(ext->vdev.name)); + + video_set_drvdata(&ext->vdev, ext); + + return video_register_device(&ext->vdev, VFL_TYPE_VIDEO, -1); +} +/**************************************************************************** + * Exported functions and variables + ****************************************************************************/ + +int mvx_ext_if_construct(struct mvx_ext_if ext[MVX_EXT_IF_COUNT], + struct device *dev, + struct mvx_fw_cache *cache, + struct mvx_client_ops *client_ops, + struct dentry *parent) +{ + int ret; + struct v4l2_device *v4l2_dev = NULL; + struct dentry *dsessions = NULL; + + ext[MVX_EXT_IF_DECODER].dev = dev; + ext[MVX_EXT_IF_DECODER].cache = cache; + ext[MVX_EXT_IF_DECODER].client_ops = client_ops; + ext[MVX_EXT_IF_ENCODER].dev = dev; + ext[MVX_EXT_IF_ENCODER].cache = cache; + ext[MVX_EXT_IF_ENCODER].client_ops = client_ops; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + dsessions = debugfs_create_dir("session", parent); + if (IS_ERR_OR_NULL(dsessions)) + return -ENOMEM; + } + + ext[MVX_EXT_IF_DECODER].dsessions = dsessions; + ext[MVX_EXT_IF_ENCODER].dsessions = dsessions; + + v4l2_dev = devm_kzalloc(dev, sizeof(*v4l2_dev), GFP_KERNEL); + if (v4l2_dev == NULL) { + ret = -ENOMEM; + goto delete_dentry; + } + + ret = v4l2_device_register(dev, v4l2_dev); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Failed to register V4L2 device. ret=%d.", ret); + goto unregister_device; + } + + ext[MVX_EXT_IF_DECODER].v4l2_dev = v4l2_dev; + ext[MVX_EXT_IF_ENCODER].v4l2_dev = v4l2_dev; + + ret = mvx_ext_if_register_device(&ext[MVX_EXT_IF_DECODER], + "mvxdec", 0); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Failed to register video decoder device. ret=%d.", + ret); + goto unregister_device; + } + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Decoder registered as /dev/video%d\n", + ext[MVX_EXT_IF_DECODER].vdev.num); + + ret = mvx_ext_if_register_device(&ext[MVX_EXT_IF_ENCODER], + "mvxenc", 1); + if (ret != 0) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_ERROR, + "Failed to register video encoder device. ret=%d.", + ret); + goto unregister_dec_device; + } + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_INFO, + "Encoder registered as /dev/video%d\n", + ext[MVX_EXT_IF_DECODER].vdev.num); + + return 0; + +unregister_dec_device: + video_unregister_device(&ext[MVX_EXT_IF_DECODER].vdev); + +unregister_device: + if (!IS_ERR_OR_NULL(v4l2_dev)) { + v4l2_device_unregister(v4l2_dev); + devm_kfree(dev, v4l2_dev); + } + +delete_dentry: + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(dsessions); + + return ret; +} + +void mvx_ext_if_destruct(struct mvx_ext_if ext[MVX_EXT_IF_COUNT]) +{ + struct v4l2_device *v4l2_dev = ext[MVX_EXT_IF_DECODER].v4l2_dev; + struct dentry *dsessions = ext[MVX_EXT_IF_DECODER].dsessions; + struct device *dev = ext[MVX_EXT_IF_DECODER].dev; + + video_unregister_device(&ext[MVX_EXT_IF_ENCODER].vdev); + video_unregister_device(&ext[MVX_EXT_IF_DECODER].vdev); + if (!IS_ERR_OR_NULL(v4l2_dev)) { + v4l2_device_unregister(v4l2_dev); + devm_kfree(dev, v4l2_dev); + ext[MVX_EXT_IF_ENCODER].v4l2_dev = NULL; + ext[MVX_EXT_IF_DECODER].v4l2_dev = NULL; + } + + if (IS_ENABLED(CONFIG_DEBUG_FS) && !IS_ERR_OR_NULL(dsessions)) { + debugfs_remove_recursive(dsessions); + ext[MVX_EXT_IF_ENCODER].dsessions = NULL; + ext[MVX_EXT_IF_DECODER].dsessions = NULL; + } +} diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_buffer.c b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_buffer.c new file mode 100755 index 000000000000..6ede824a31af --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_buffer.c @@ -0,0 +1,563 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include "mvx_ext_if.h" +#include "mvx_log_group.h" +#include "mvx_seq.h" +#include "mvx_v4l2_buffer.h" + +/**************************************************************************** + * Static functions and variables + ****************************************************************************/ + +static void v4l2_buffer_show(struct mvx_v4l2_buffer *buffer, + struct seq_file *s) +{ + struct vb2_v4l2_buffer *v4l2 = &buffer->vb2_v4l2_buffer; + struct vb2_buffer *vb2 = &v4l2->vb2_buf; + int is_multi = V4L2_TYPE_IS_MULTIPLANAR(vb2->type); + int i; + int ind = 0; + + mvx_seq_printf(s, "mvx_v4l2_buffer", ind, "%px\n", buffer); + + ind++; + mvx_seq_printf(s, "vb2", ind, "%px\n", vb2); + + ind++; + mvx_seq_printf(s, "index", ind, "%u\n", vb2->index); + mvx_seq_printf(s, "type", ind, "%u (multi: %s)\n", + vb2->type, is_multi ? "yes" : "no"); + mvx_seq_printf(s, "flags", ind, "0x%08x\n", v4l2->flags); + mvx_seq_printf(s, "field", ind, "%u\n", v4l2->field); + +#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE + mvx_seq_printf(s, "timestamp", ind, "%llu\n", vb2->timestamp); +#else + mvx_seq_printf(s, "timestamp", ind, "\n"); + ind++; + mvx_seq_printf(s, "tv_sec", ind, "%lu\n", v4l2->timestamp.tv_sec); + mvx_seq_printf(s, "tv_usec", ind, "%lu\n", v4l2->timestamp.tv_usec); + ind--; +#endif + mvx_seq_printf(s, "timecode", ind, "\n"); + ind++; + mvx_seq_printf(s, "type", ind, "%u\n", v4l2->timecode.type); + mvx_seq_printf(s, "flags", ind, "%u\n", v4l2->timecode.flags); + mvx_seq_printf(s, "frames", ind, "%u\n", v4l2->timecode.frames); + mvx_seq_printf(s, "seconds", ind, "%u\n", v4l2->timecode.seconds); + mvx_seq_printf(s, "minutes", ind, "%u\n", v4l2->timecode.minutes); + mvx_seq_printf(s, "hours", ind, "%u\n", v4l2->timecode.hours); + ind--; + + mvx_seq_printf(s, "sequence", ind, "%u\n", v4l2->sequence); + mvx_seq_printf(s, "memory", ind, "%u\n", vb2->memory); + + mvx_seq_printf(s, "num_planes", ind, "%u\n", vb2->num_planes); + + mvx_seq_printf(s, "planes", ind, "\n"); + ind++; + for (i = 0; i < vb2->num_planes; ++i) { + char tag[10]; + struct vb2_plane *plane = &vb2->planes[i]; + + scnprintf(tag, sizeof(tag), "#%d", i); + mvx_seq_printf(s, tag, ind, + "bytesused: %10u, length: %10u, m.offset: %10u, m.userptr: %10lu, m.fd: %10d, data_offset: %10u\n", + plane->bytesused, + plane->length, + plane->m.offset, + plane->m.userptr, + plane->m.fd, + plane->data_offset); + } + + ind--; +} + +static int buffer_stat_show(struct seq_file *s, + void *v) +{ + struct mvx_v4l2_buffer *vbuf = s->private; + + v4l2_buffer_show(vbuf, s); + seq_puts(s, "\n"); + mvx_buffer_show(&vbuf->buf, s); + + return 0; +} + +static int buffer_stat_open(struct inode *inode, + struct file *file) +{ + return single_open(file, buffer_stat_show, inode->i_private); +} + +static const struct file_operations buffer_stat_fops = { + .open = buffer_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +static int buffer_debugfs_init(struct dentry *parent, + struct mvx_v4l2_buffer *vbuf) +{ + char name[20]; + struct dentry *dentry; + + scnprintf(name, sizeof(name), "buffer%u", to_vb2_buf(vbuf)->index); + vbuf->dentry = debugfs_create_dir(name, parent); + if (IS_ERR_OR_NULL(vbuf->dentry)) + return -ENOMEM; + + dentry = debugfs_create_file("stat", 0400, vbuf->dentry, vbuf, + &buffer_stat_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + return 0; +} + +/** + * get_bytesused() - Get total number of bytes used for Vb2 buffer. + */ +static size_t get_bytesused(struct vb2_buffer *b) +{ + size_t size; + uint32_t i; + + for (i = 0, size = 0; i < b->num_planes; i++) + size += b->planes[i].bytesused; + + return size; +} + +static int clear_bytesused(struct vb2_buffer *b) +{ + uint32_t i; + + for (i = 0; i < b->num_planes; i++) + b->planes[i].bytesused = 0; + + return 0; +} + +/* Update mvx_buffer flags from vb2_buffer flags */ +static int update_mvx_flags(struct mvx_buffer *buf, + struct vb2_buffer *b) +{ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(b->vb2_queue); + struct mvx_session *session = &vport->vsession->session; + enum mvx_nalu_format nalu_format = session->nalu_format; + struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(b); + __u32 flags = vb2_v4l2->flags; + __u32 osd_flags = (buf->flags & MVX_BUFFER_FRAME_FLAG_OSD_MASK); + buf->flags = osd_flags; + + if (V4L2_TYPE_IS_OUTPUT(b->type) != false && get_bytesused(b) == 0) + flags |= V4L2_BUF_FLAG_LAST; + + if (flags & V4L2_BUF_FLAG_LAST) + buf->flags |= MVX_BUFFER_EOS; + + if (mvx_is_frame(buf->format) && buf->dir == MVX_DIR_INPUT) { + if (flags & V4L2_BUF_FLAG_KEYFRAME) { + //idr flag has conflict with B frames, disable idr when B frames has positive value + if (session->b_frames == 0) + //encode frame port for idr flag + buf->flags |= MVX_BUFFER_FRAME_FLAG_FORCE_IDR; + } + } + + if (mvx_is_afbc(buf->format)) { + if ((flags & V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS) == V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS) + buf->flags |= MVX_BUFFER_AFBC_TILED_HEADERS; + + if ((flags & V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY) == V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY) + buf->flags |= MVX_BUFFER_AFBC_TILED_BODY; + + if ((flags & V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK) == V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK) + buf->flags |= MVX_BUFFER_AFBC_32X8_SUPERBLOCK; + if (buf->dir == MVX_DIR_INPUT) //encode frame port + { + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_EPR) == V4L2_BUF_FLAG_MVX_BUFFER_EPR) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_GENERAL; + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_ROI) == V4L2_BUF_FLAG_MVX_BUFFER_ROI) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROI; + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_CHR) == V4L2_BUF_FLAG_MVX_BUFFER_CHR) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_CHR; + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_RESET_RC) == V4L2_BUF_FLAG_MVX_BUFFER_RESET_RC) + { + buf->flags |= MVX_BUFFER_FRAME_FLAG_RESET_RC; + } + } + } else if (mvx_is_bitstream(buf->format)) { + if (buf->dir == MVX_DIR_INPUT) { + //decode bitstream port + if ((flags & V4L2_BUF_FLAG_END_OF_SUB_FRAME) == V4L2_BUF_FLAG_END_OF_SUB_FRAME){ + buf->flags |= MVX_BUFFER_END_OF_SUB_FRAME; + } + if (flags & V4L2_BUF_FLAG_KEYFRAME) + buf->flags |= MVX_BUFFER_EOF; + if ((flags & V4L2_BUF_FLAG_MVX_CODEC_CONFIG) == V4L2_BUF_FLAG_MVX_CODEC_CONFIG) + buf->flags |= MVX_BUFFER_CODEC_CONFIG; + + /* + * MMF does not set nalu_format and usually one frame per buffer. + * Hence, MVX_BUFFER_EOF flag must be set, otherwise the timestamp + * may incorrect. If one buffer contains multiple frames, nalu_format + * must be set. + */ + if (nalu_format == MVX_NALU_FORMAT_UNDEFINED) + buf->flags |= MVX_BUFFER_EOF; + } + } else if (mvx_is_frame(buf->format)) { + if (buf->dir == MVX_DIR_OUTPUT) { + //decode frame port + if (flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) { + if ((flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) == V4L2_BUF_FRAME_FLAG_ROTATION_90) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_90; + } else if ((flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) == V4L2_BUF_FRAME_FLAG_ROTATION_180) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_180; + } else if ((flags & V4L2_BUF_FRAME_FLAG_ROTATION_MASK) == V4L2_BUF_FRAME_FLAG_ROTATION_270) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_270; + } + } + if (flags & V4L2_BUF_FRAME_FLAG_SCALING_MASK) { + if ((flags & V4L2_BUF_FRAME_FLAG_SCALING_MASK) == V4L2_BUF_FRAME_FLAG_SCALING_2) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_SCALING_2; + } else if ((flags & V4L2_BUF_FRAME_FLAG_SCALING_MASK) == V4L2_BUF_FRAME_FLAG_SCALING_4) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_SCALING_4; + } + } + } else if (buf->dir == MVX_DIR_INPUT) { + //encode frame port + if (flags & V4L2_BUF_FRAME_FLAG_MIRROR_MASK) { + if ((flags & V4L2_BUF_FRAME_FLAG_MIRROR_MASK) == V4L2_BUF_FRAME_FLAG_MIRROR_HORI) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_MIRROR_HORI; + } else if ((flags & V4L2_BUF_FRAME_FLAG_MIRROR_MASK) == V4L2_BUF_FRAME_FLAG_MIRROR_VERT) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_MIRROR_VERT; + } + } + if (flags & V4L2_BUF_ENCODE_FLAG_ROTATION_MASK) { + if ((flags & V4L2_BUF_ENCODE_FLAG_ROTATION_MASK) == V4L2_BUF_ENCODE_FLAG_ROTATION_90) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_90; + } else if ((flags & V4L2_BUF_ENCODE_FLAG_ROTATION_MASK) == V4L2_BUF_ENCODE_FLAG_ROTATION_180) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_180; + } else if ((flags & V4L2_BUF_ENCODE_FLAG_ROTATION_MASK) == V4L2_BUF_ENCODE_FLAG_ROTATION_270) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROTATION_270; + } + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_EPR) == V4L2_BUF_FLAG_MVX_BUFFER_EPR) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_GENERAL; + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_ROI) == V4L2_BUF_FLAG_MVX_BUFFER_ROI) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_ROI; + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_CHR) == V4L2_BUF_FLAG_MVX_BUFFER_CHR) { + buf->flags |= MVX_BUFFER_FRAME_FLAG_CHR; + } + if ((flags & V4L2_BUF_FLAG_MVX_BUFFER_RESET_RC) == V4L2_BUF_FLAG_MVX_BUFFER_RESET_RC) + { + buf->flags |= MVX_BUFFER_FRAME_FLAG_RESET_RC; + } + } + + } else { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "unrecognized buffer format!."); + + } + + return 0; +} + +static bool is_contiguous_planes(unsigned int pixelformat, int memory) +{ + return (memory != V4L2_MEMORY_MMAP) && + (pixelformat == V4L2_PIX_FMT_YUV420 || + pixelformat == V4L2_PIX_FMT_NV12 || + pixelformat == V4L2_PIX_FMT_NV21 || + pixelformat == V4L2_PIX_FMT_P010); +} + +/* Update mvx_buffer from mvx_v4l2_buffer */ +static int update_mvx_buffer(struct mvx_v4l2_buffer *vbuf) +{ + struct vb2_buffer *vb2 = to_vb2_buf(vbuf); + struct mvx_buffer *mvx_buf = &vbuf->buf; + struct mvx_v4l2_port *vport = vb2_get_drv_priv(vb2->vb2_queue); + struct mvx_session *session = &vport->vsession->session; + int i; + int ret; + + if ((!session->dual_afbc_downscaled && vb2->num_planes != mvx_buf->nplanes) || + (session->dual_afbc_downscaled && vb2->num_planes > mvx_buf->nplanes)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "VB2 and MVX buffers have different number of planes. vb2_planes=%u, mvx_planes=%u.", + vb2->num_planes, mvx_buf->nplanes); + return -EINVAL; + } + +#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE + mvx_buf->user_data = vb2->timestamp; +#else + { + struct timeval *ts = &vbuf->vb2_v4l2_buffer.timestamp; + + mvx_buf->user_data = ((uint64_t)ts->tv_sec << 32) | + (ts->tv_usec & 0xffffffff); + } +#endif + + for (i = 0; i < vb2->num_planes; i++) { + unsigned int offset = vb2->planes[i].data_offset; + + /* + * For single planar mmap buffers the offset is carried by + * the lower part of the offset. + */ + if (vb2->memory == V4L2_MEMORY_MMAP) + offset += vb2->planes[i].m.offset & ~PAGE_MASK; + + /* MVX filled is the number of bytes excluding the offset. */ + ret = mvx_buffer_filled_set(mvx_buf, i, + vb2->planes[i].bytesused - offset, offset); + if (ret != 0) + return ret; + mvx_buf->planes[i].length = vb2->planes[i].length; + } + + mvx_buf->is_contiguous = + is_contiguous_planes(vport->port->pixelformat, vb2->memory); + + ret = update_mvx_flags(mvx_buf, to_vb2_buf(vbuf)); + if (ret != 0) + return 0; + + return 0; +} + +static int update_v4l2_bytesused(struct mvx_v4l2_buffer *vbuf) +{ + struct vb2_buffer *b = to_vb2_buf(vbuf); + struct mvx_buffer *buf = &vbuf->buf; + struct mvx_v4l2_port *vport = vb2_get_drv_priv(b->vb2_queue); + struct mvx_session *session = &vport->vsession->session; + int i; + + if ((!session->dual_afbc_downscaled && b->num_planes != buf->nplanes) || + (session->dual_afbc_downscaled && b->num_planes > buf->nplanes)) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "VB2 and MVX buffers have different number of planes. vb2_planes=%u, mvx_planes=%u.", + b->num_planes, buf->nplanes); + return -EINVAL; + } + + /* + * MVX filled is the number of bytes excluding the offset. The total + * length is calculated as 'filled + offset' and should be <= length. + * + * V4L2 bytesused is the total length including the offset. + * bytesused should be <= length and bytesused >= offset. + */ + + for (i = 0; i < b->num_planes; i++) { + b->planes[i].bytesused = + buf->planes[i].filled + buf->planes[i].offset; + b->planes[i].data_offset = buf->planes[i].offset; + } + + return 0; +} + +static int update_vb2_flags(struct mvx_v4l2_buffer *vbuf) +{ + struct vb2_v4l2_buffer *b = &vbuf->vb2_v4l2_buffer; + struct mvx_buffer *buf = &vbuf->buf; + + b->flags &= ~(V4L2_BUF_FLAG_ERROR | + V4L2_BUF_FLAG_KEYFRAME | + V4L2_BUF_FLAG_LAST | + V4L2_BUF_FLAG_END_OF_SUB_FRAME); + + if (buf->flags & MVX_BUFFER_EOS) + b->flags |= V4L2_BUF_FLAG_LAST; + + if (buf->flags & MVX_BUFFER_SYNCFRAME) + b->flags |= V4L2_BUF_FLAG_KEYFRAME; + + if (buf->flags & MVX_BUFFER_CORRUPT) + b->flags |= V4L2_BUF_FLAG_ERROR; + + if (buf->flags & MVX_BUFFER_REJECTED) + clear_bytesused(&b->vb2_buf); + + if (buf->flags & MVX_BUFFER_DECODE_ONLY) + b->flags |= V4L2_BUF_FLAG_MVX_DECODE_ONLY; + + if (buf->flags & MVX_BUFFER_CODEC_CONFIG) + b->flags |= V4L2_BUF_FLAG_MVX_CODEC_CONFIG; + + if (buf->flags & MVX_BUFFER_AFBC_TILED_HEADERS) + b->flags |= V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS; + + if (buf->flags & MVX_BUFFER_AFBC_TILED_BODY) + b->flags |= V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY; + + if (buf->flags & MVX_BUFFER_AFBC_32X8_SUPERBLOCK) + b->flags |= V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK; + + if (buf->flags & MVX_BUFFER_FRAME_PRESENT && buf->dir == MVX_DIR_OUTPUT) + b->flags |= V4L2_BUF_FLAG_MVX_BUFFER_FRAME_PRESENT; + + if (buf->flags & MVX_BUFFER_FRAME_NEED_REALLOC) + b->flags |= V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC; + + if (buf->flags & MVX_BUFFER_ENC_STATS) + b->flags |= V4L2_BUF_FLAG_MVX_BUFFER_ENC_STATS; + + /* + * For encoder(especially VP8/VP9/AV1), one encoded frame may split into + * several buffers, and set MVX_BUFFER_EOF flag with the last subframe. + * Hence, add V4L2_BUF_FLAG_END_OF_SUB_FRAME flag to other subframes to + * notify user. + */ + if (!(buf->flags & MVX_BUFFER_EOF) && mvx_is_bitstream(buf->format)) + b->flags |= V4L2_BUF_FLAG_END_OF_SUB_FRAME; + + return 0; +} + +/**************************************************************************** + * Exported functions and variables + ****************************************************************************/ + +int mvx_v4l2_buffer_construct(struct mvx_v4l2_buffer *vbuf, + struct mvx_v4l2_session *vsession, + enum mvx_direction dir, + unsigned int nplanes, + struct sg_table **sgt) +{ + int ret; + + ret = mvx_buffer_construct(&vbuf->buf, vsession->ext->dev, + &vsession->session.mmu, dir, + nplanes, sgt); + if (ret != 0) + return ret; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + struct mvx_v4l2_port *vport = &vsession->port[dir]; + + ret = buffer_debugfs_init(vport->dentry, vbuf); + if (ret != 0) { + MVX_SESSION_WARN(&vsession->session, + "Failed to create buffer debugfs entry."); + goto destruct_buffer; + } + } + + return 0; + +destruct_buffer: + mvx_buffer_destruct(&vbuf->buf); + + return ret; +} + +void mvx_v4l2_buffer_destruct(struct mvx_v4l2_buffer *vbuf) +{ + mvx_buffer_destruct(&vbuf->buf); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(vbuf->dentry); +} + +struct mvx_v4l2_buffer *mvx_buffer_to_v4l2_buffer(struct mvx_buffer *buffer) +{ + return container_of(buffer, struct mvx_v4l2_buffer, buf); +} + +/* Update mvx_v4l2_buffer from vb2_buffer */ +int mvx_v4l2_buffer_set(struct mvx_v4l2_buffer *vbuf, + struct vb2_buffer *b) +{ + int ret; + + ret = update_mvx_buffer(vbuf); + if (ret != 0) + return ret; + + return 0; +} + +enum vb2_buffer_state mvx_v4l2_buffer_update(struct mvx_v4l2_buffer *vbuf) +{ + struct vb2_buffer *vb2 = to_vb2_buf(vbuf); + struct mvx_buffer *mvx_buf = &vbuf->buf; + int ret = 0; + + if (!V4L2_TYPE_IS_OUTPUT(vb2->type)) + ret = update_v4l2_bytesused(vbuf); + + if (ret != 0) + goto error; + + ret = update_vb2_flags(vbuf); + if (ret != 0 || + (vbuf->vb2_v4l2_buffer.flags & V4L2_BUF_FLAG_ERROR) != 0) + goto error; + +#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE + vb2->timestamp = mvx_buf->user_data; +#else + { + struct timeval *ts = &vbuf->vb2_v4l2_buffer.timestamp; + + ts->tv_sec = mvx_buf->user_data >> 32; + ts->tv_usec = mvx_buf->user_data & 0xffffffff; + } +#endif + + return VB2_BUF_STATE_DONE; + +error: + return VB2_BUF_STATE_ERROR; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_buffer.h b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_buffer.h new file mode 100755 index 000000000000..d150da515eb0 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_buffer.h @@ -0,0 +1,167 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_V4L2_BUFFER_H_ +#define _MVX_V4L2_BUFFER_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include "mvx_buffer.h" +#include "mvx_if.h" +#include "mvx_v4l2_session.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +#define vb2_v4l2_to_mvx_v4l2_buffer(v4l2) \ + container_of(v4l2, struct mvx_v4l2_buffer, vb2_v4l2_buffer) + +#define vb2_to_mvx_v4l2_buffer(vb2) \ + vb2_v4l2_to_mvx_v4l2_buffer(to_vb2_v4l2_buffer(vb2)) + +#define to_vb2_buf(vbuf) (&((vbuf)->vb2_v4l2_buffer.vb2_buf)) + +/** + * struct mvx_v4l2_buffer - MVX V4L2 buffer. + * @vb2_v4l2_buffer: VB2 V4L2 buffer. + * @buf: MVX buffer. + * @dentry: Debug file system entry. + */ +struct mvx_v4l2_buffer { + struct vb2_v4l2_buffer vb2_v4l2_buffer; + struct mvx_buffer buf; + struct dentry *dentry; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_v4l2_buffer_construct() - Construct MVX V4L2 buffer object. + * @vbuf: Pointer to MVX V4L2 buffer. + * @vsession: Pointer to V4L2 session. + * @dir: Direction of the buffer. + * @nplanes: Number of planes. + * @sgt: Array of pointers to scatter-gatter lists. Each SG list + * contains memory pages for a corresponding plane. + * + * Return: 0 on success, else error code. + */ +int mvx_v4l2_buffer_construct(struct mvx_v4l2_buffer *vbuf, + struct mvx_v4l2_session *vsession, + enum mvx_direction dir, + unsigned int nplanes, + struct sg_table **sgt); + +/** + * mvx_v4l2_buffer_destruct() - Destruct v4l2 buffer object. + * @vbuf: Pointer to MVX V4L2 buffer. + */ +void mvx_v4l2_buffer_destruct(struct mvx_v4l2_buffer *vbuf); + +/** + * mvx_buffer_to_v4l2_buffer() - Cast mvx_buffer to mvx_v4l2_buffer. + * @buf: Pointer MVX buffer. + * + * This function casts a pointer to struct mvx_buffer to a pointer to + * a corresponding struct mvx_v4l2_buffer. + * + * Return: Pointer to corresponding mvx_v4l2_buffer object. + */ +struct mvx_v4l2_buffer *mvx_buffer_to_v4l2_buffer(struct mvx_buffer *buf); + +/** + * mvx_v4l2_buffer_set_status() - Set status for a buffer. + * @vbuf: Pointer to MVX V4L2 buffer. + * @status: Status to set. + * + * Status is a combination of the following flags: + * V4L2_BUF_FLAG_QUEUED, + * V4L2_BUF_FLAG_DONE, + * V4L2_BUF_FLAG_PREPARED, + * V4L2_BUF_FLAG_ERROR + */ +void mvx_v4l2_buffer_set_status(struct mvx_v4l2_buffer *vbuf, + uint32_t status); + +/** + * mvx_v4l2_buffer_get_status() - Get the buffer status. + * @vbuf: Pointer to MVX V4L2 buffer. + * + * Return: Buffer status. + */ +uint32_t mvx_v4l2_buffer_get_status(struct mvx_v4l2_buffer *vbuf); + +/** + * mvx_v4l2_buffer_set() - Copy Vb2 buffer to VBUF. + * @vbuf: Destination MVX V4L2 buffer. + * @b: Source Vb2 buffer. + * + * Copies and validates paramters from 'b' to 'vbuf'. + * + * Return: 0 on success, else error code. + */ +int mvx_v4l2_buffer_set(struct mvx_v4l2_buffer *vbuf, + struct vb2_buffer *b); + +/** + * mvx_v4l2_buffer_get() - Copy VBUF to V4L2 buffer. + * @vbuf: Source MVX V4L2 buffer. + * @b: Destination V4L2 buffer. + * + * Copies parameters from 'vbuf' to 'b'. + */ +void mvx_v4l2_buffer_get(struct mvx_v4l2_buffer *vbuf, + struct v4l2_buffer *b); + +/** + * mvx_v4l2_buffer_update() - Update the V4L2 buffer. + * @vbuf: Pointer to MVX V4L2 buffer. + * + * This function copies parameters from the MVX buffer to the V4L2 buffer. + * It also sets the time stamp and validates that the buffer length is correct. + * If an error is detectd the buffer length is cleared and the error flag + * is set. + * + * This function should be called after the MVX buffer has changed, for example + * after it has been returned by the firmware or flushed. + * + * Return: VB2_BUF_STATE_DONE on success, else VB2_BUF_STATE_ERROR. + */ +enum vb2_buffer_state mvx_v4l2_buffer_update(struct mvx_v4l2_buffer *vbuf); + +#endif /* _MVX_V4L2_BUFFER_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_ctrls.c b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_ctrls.c new file mode 100755 index 000000000000..c66390f62cce --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_ctrls.c @@ -0,0 +1,2164 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ +#include +#include + +#include "mvx_bitops.h" +#include "mvx_v4l2_ctrls.h" +#include "mvx_v4l2_session.h" + + +/**************************************************************************** + * Static functions and variables + ****************************************************************************/ + +/* + * V4L2_CID_MVE_VIDEO_NALU_FORMAT control defines. + */ +static const char *const nalu_format_str[] = { + "Default", + "Start codes", + "One nalu per buffer", + "One byte length field", + "Two byte length field", + "Four byte length field" +}; + +static const enum mvx_nalu_format mvx_nalu_format_list[] = { + MVX_NALU_FORMAT_UNDEFINED, + MVX_NALU_FORMAT_START_CODES, + MVX_NALU_FORMAT_ONE_NALU_PER_BUFFER, + MVX_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD, + MVX_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD, + MVX_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD +}; + +static const uint8_t video_bitrate_mode_list[] = { + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, + V4L2_MPEG_VIDEO_BITRATE_MODE_STANDARD, + V4L2_MPEG_VIDEO_BITRATE_MODE_CVBR +}; + +/* + * V4L2_CID_MPEG_VIDEO_HEVC_PROFILE control defines. + */ +static const uint8_t h265_profile_list[] = { + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 +}; + +static const int mvx_h265_profile_list[] = { + MVX_PROFILE_H265_MAIN, + MVX_PROFILE_H265_MAIN_STILL, + MVX_PROFILE_H265_MAIN_10 +}; + +/* + * V4L2_CID_MVE_VIDEO_AV1_PROFILE control defines. + */ +static const char *const av1_profile_str[] = { + "Main" +}; + +/* + * V4L2_CID_MPEG_VIDEO_VP9_PROFILE control defines. + */ +static const uint8_t vp9_profile_list[] = { + V4L2_MPEG_VIDEO_VP9_PROFILE_0, + V4L2_MPEG_VIDEO_VP9_PROFILE_2 +}; + +/* + * V4L2_CID_MPEG_VIDEO_VP8_PROFILE control defines. + */ +static const uint8_t vp8_profile_list[] = { + V4L2_MPEG_VIDEO_VP8_PROFILE_0 +}; + +/* + * V4L2_CID_MVE_VIDEO_VC1_PROFILE control defines. + */ +static const char *const vc1_profile_str[] = { + "Simple", + "Main", + "Advanced" +}; + +static const int mvx_vc1_profile_list[] = { + MVX_PROFILE_VC1_SIMPLE, + MVX_PROFILE_VC1_MAIN, + MVX_PROFILE_VC1_ADVANCED +}; + +/* + * V4L2_CID_MPEG_VIDEO_H264_PROFILE control defines. + */ +static const uint8_t h264_profile_list[] = { + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, + V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10 +}; + +static const enum mvx_profile mvx_h264_profile_list[] = { + MVX_PROFILE_H264_BASELINE, + MVX_PROFILE_H264_BASELINE, + MVX_PROFILE_H264_MAIN, + MVX_PROFILE_H264_HIGH, + MVX_PROFILE_H264_HIGH_10 +}; + +/* + * V4L2_CID_MPEG_VIDEO_H264_LEVEL control defines. + */ +static uint8_t h264_level_list[] = { + V4L2_MPEG_VIDEO_H264_LEVEL_1_0, + V4L2_MPEG_VIDEO_H264_LEVEL_1B, + V4L2_MPEG_VIDEO_H264_LEVEL_1_1, + V4L2_MPEG_VIDEO_H264_LEVEL_1_2, + V4L2_MPEG_VIDEO_H264_LEVEL_1_3, + V4L2_MPEG_VIDEO_H264_LEVEL_2_0, + V4L2_MPEG_VIDEO_H264_LEVEL_2_1, + V4L2_MPEG_VIDEO_H264_LEVEL_2_2, + V4L2_MPEG_VIDEO_H264_LEVEL_3_0, + V4L2_MPEG_VIDEO_H264_LEVEL_3_1, + V4L2_MPEG_VIDEO_H264_LEVEL_3_2, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1, + V4L2_MPEG_VIDEO_H264_LEVEL_4_2, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2, + V4L2_MPEG_VIDEO_H264_LEVEL_6_0, + V4L2_MPEG_VIDEO_H264_LEVEL_6_1 +}; + +static const int mvx_h264_level_list[] = { + MVX_LEVEL_H264_1, + MVX_LEVEL_H264_1b, + MVX_LEVEL_H264_11, + MVX_LEVEL_H264_12, + MVX_LEVEL_H264_13, + MVX_LEVEL_H264_2, + MVX_LEVEL_H264_21, + MVX_LEVEL_H264_22, + MVX_LEVEL_H264_3, + MVX_LEVEL_H264_31, + MVX_LEVEL_H264_32, + MVX_LEVEL_H264_4, + MVX_LEVEL_H264_41, + MVX_LEVEL_H264_42, + MVX_LEVEL_H264_5, + MVX_LEVEL_H264_51, + MVX_LEVEL_H264_52, + MVX_LEVEL_H264_6, + MVX_LEVEL_H264_61 +}; + +/* + * V4L2_CID_MPEG_VIDEO_HEVC_LEVEL control defines. + */ +static uint8_t h265_level_list[] = { + V4L2_MPEG_VIDEO_HEVC_LEVEL_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_2, + V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_3, + V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 +}; + +static const int mvx_h265_level_list[] = { + MVX_LEVEL_H265_1, + MVX_LEVEL_H265_2, + MVX_LEVEL_H265_21, + MVX_LEVEL_H265_3, + MVX_LEVEL_H265_31, + MVX_LEVEL_H265_4, + MVX_LEVEL_H265_41, + MVX_LEVEL_H265_5, + MVX_LEVEL_H265_51, + MVX_LEVEL_H265_52, + MVX_LEVEL_H265_6, + MVX_LEVEL_H265_61, + MVX_LEVEL_H265_62 +}; + +/* + * V4L2_CID_MPEG_VIDEO_HEVC_TIER control defines. + */ +static uint8_t h265_tier_list[] = { + V4L2_MPEG_VIDEO_HEVC_TIER_MAIN, + V4L2_MPEG_VIDEO_HEVC_TIER_HIGH +}; + +static const int mvx_h265_tier_list[] = { + MVX_TIER_MAIN, + MVX_TIER_HIGH +}; + +/* + * V4L2_CID_MVE_VIDEO_GOP_TYPE control defines. + */ +static const char *const gop_type_str[] = { + "None", + "Bidirectional", + "Low delay", + "Pyramid", + "svct3", + "gdr" +}; + +static const enum mvx_gop_type mvx_gop_type_list[] = { + MVX_GOP_TYPE_NONE, + MVX_GOP_TYPE_BIDIRECTIONAL, + MVX_GOP_TYPE_LOW_DELAY, + MVX_GOP_TYPE_PYRAMID, + MVX_GOP_TYPE_SVCT3, + MVX_GOP_TYPE_GDR +}; + +/* + * V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE control defines. + */ +#define V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_NONE 2 + +static const uint8_t h264_entropy_mode_list[] = { + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC, + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_NONE +}; + +static const enum mvx_entropy_mode mvx_h264_entropy_mode_list[] = { + MVX_ENTROPY_MODE_CAVLC, + MVX_ENTROPY_MODE_CABAC, + MVX_ENTROPY_MODE_NONE +}; + +static const char *const h264_entropy_mode_str[] = { + "CAVLC", + "CABAC", + "None" +}; + +/* + * V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE controls list. + */ +static uint8_t multi_slice_mode_list[] = { + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, + + /* Misspelling in the header file */ + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB +}; + +static const enum mvx_multi_slice_mode mvx_multi_slice_mode_list[] = { + MVX_MULTI_SLICE_MODE_SINGLE, + MVX_MULTI_SLICE_MODE_MAX_MB +}; + +/* + * V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE control defines. + */ +static const char *const vp9_prob_update_str[] = { + "Disabled", + "Implicit", + "Explicit" +}; + +static const enum mvx_vp9_prob_update mvx_vp9_prob_update_list[] = { + MVX_VP9_PROB_UPDATE_DISABLED, + MVX_VP9_PROB_UPDATE_IMPLICIT, + MVX_VP9_PROB_UPDATE_EXPLICIT +}; + +/* + * V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE control defines. + */ +static const char *const rgb_to_yuv_mode_str[] = { + "BT601 studio", + "BT601 full", + "BT709 studio", + "BT709 full", + "BT2020 studio", + "BT2020 full", + "rgb to yuv mode max" +}; + +static const enum mvx_rgb_to_yuv_mode mvx_rgb_to_yuv_mode_list[] = { + MVX_RGB_TO_YUV_MODE_BT601_STUDIO, + MVX_RGB_TO_YUV_MODE_BT601_FULL, + MVX_RGB_TO_YUV_MODE_BT709_STUDIO, + MVX_RGB_TO_YUV_MODE_BT709_FULL, + MVX_RGB_TO_YUV_MODE_BT2020_STUDIO, + MVX_RGB_TO_YUV_MODE_BT2020_FULL, + MVX_RGB_TO_YUV_MODE_MAX +}; + +/** + * find_idx() - Find index of a value in an array. + * @list: Pointer to an array. + * @size: Size of an array. + * @val: Value to look for. + * + * Return: Index of the first occurrence of 'val' in 'list', + * or -EINVAL when not found. + */ +static int find_idx(const uint8_t *list, + size_t size, + uint8_t val) +{ + while (size--) + if (list[size] == val) + return size; + + return -EINVAL; +} + +/** + * set_ctrl() - Callback used by V4L2 framework to set a control. + * @ctrl: V4L2 control. + * + * Return: 0 on success, error code otherwise. + */ +static int set_ctrl(struct v4l2_ctrl *ctrl) +{ + int ret = 0; + struct mvx_v4l2_session *vsession = + container_of(ctrl->handler, struct mvx_v4l2_session, + v4l2_ctrl); + struct mvx_session *session = &vsession->session; + enum mvx_nalu_format nalu_fmt; + enum mvx_profile mvx_profile; + enum mvx_level mvx_level; + enum mvx_tier mvx_tier; + enum mvx_gop_type gop_type; + enum mvx_entropy_mode entropy_mode; + enum mvx_multi_slice_mode multi_slice_mode; + enum mvx_vp9_prob_update vp9_prob_update; + enum mvx_rgb_to_yuv_mode rgb_to_yuv_mode; + int32_t i32_val; + bool bool_val; + enum mvx_tristate tri_val; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + switch (ctrl->id) { + case V4L2_CID_MVE_VIDEO_SECURE_VIDEO: + bool_val = *ctrl->p_new.p_s32 != 0; + ret = mvx_session_set_securevideo(session, bool_val); + break; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + bool_val = *ctrl->p_new.p_s32 != 0; + ret = mvx_session_set_rate_control(session, bool_val); + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(video_bitrate_mode_list, + ARRAY_SIZE(video_bitrate_mode_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + ret = mvx_session_set_bitrate_mode(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_BITRATE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_bitrate(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_max_bitrate(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_RC_I_MODE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_rc_bit_i_mode(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_RC_I_RATIO: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_rc_bit_i_ratio(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_INTER_MED_BUF_SIZE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_inter_med_buf_size(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_SVCT3_LEVEL1_PERIOD: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_svct3_level1_period(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_HRD_BUFFER_SIZE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_hrd_buffer_size(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_NALU_FORMAT: + i32_val = *ctrl->p_new.p_s32; + nalu_fmt = mvx_nalu_format_list[i32_val]; + ret = mvx_session_set_nalu_format(session, nalu_fmt); + break; + case V4L2_CID_MVE_VIDEO_STREAM_ESCAPING: + tri_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_stream_escaping(session, tri_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(h264_profile_list, + ARRAY_SIZE(h264_profile_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + mvx_profile = mvx_h264_profile_list[ret]; + ret = mvx_session_set_profile(session, + MVX_FORMAT_H264, + mvx_profile); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(h265_profile_list, + ARRAY_SIZE(h265_profile_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + mvx_profile = mvx_h265_profile_list[ret]; + ret = mvx_session_set_profile(session, + MVX_FORMAT_HEVC, + mvx_profile); + break; + case V4L2_CID_MVE_VIDEO_VC1_PROFILE: + i32_val = *ctrl->p_new.p_s32; + mvx_profile = mvx_vc1_profile_list[i32_val]; + ret = mvx_session_set_profile(session, + MVX_FORMAT_VC1, + mvx_profile); + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(h264_level_list, + ARRAY_SIZE(h264_level_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + mvx_level = mvx_h264_level_list[ret]; + ret = mvx_session_set_level(session, + MVX_FORMAT_H264, + mvx_level); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(h265_level_list, + ARRAY_SIZE(h265_level_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + mvx_level = mvx_h265_level_list[ret]; + ret = mvx_session_set_level(session, + MVX_FORMAT_HEVC, + mvx_level); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_TIER: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(h265_tier_list, + ARRAY_SIZE(h265_tier_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + mvx_tier = mvx_h265_tier_list[ret]; + ret = mvx_session_set_tier(session, + MVX_FORMAT_HEVC, + mvx_tier); + break; + case V4L2_CID_MVE_VIDEO_IGNORE_STREAM_HEADERS: + tri_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_ignore_stream_headers(session, tri_val); + break; + case V4L2_CID_MVE_VIDEO_FRAME_REORDERING: + tri_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_frame_reordering(session, tri_val); + break; + case V4L2_CID_MVE_VIDEO_INTBUF_SIZE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_intbuf_size(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_b_frames(session, i32_val); + if (ret == 0) + ret = mvx_session_update_input_buffer_min(session); + // Update P-frames according latest B-frames and GOP size. + if (ret == 0) + ret = mvx_session_update_p_frames(session); + break; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_gop_size(session, i32_val); + // VPU not support set GOP size directly, set it by P-frames. + if (ret == 0) + ret = mvx_session_update_p_frames(session); + break; + case V4L2_CID_MVE_VIDEO_GOP_TYPE: + i32_val = *ctrl->p_new.p_s32; + gop_type = mvx_gop_type_list[i32_val]; + ret = mvx_session_set_gop_type(session, gop_type); + break; + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_cyclic_intra_refresh_mb(session, + i32_val); + break; + case V4L2_CID_MVE_VIDEO_CONSTR_IPRED: + tri_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_constr_ipred(session, tri_val); + break; + case V4L2_CID_MVE_VIDEO_ENTROPY_SYNC: + tri_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_entropy_sync(session, tri_val); + break; + case V4L2_CID_MVE_VIDEO_TEMPORAL_MVP: + tri_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_temporal_mvp(session, tri_val); + break; + case V4L2_CID_MVE_VIDEO_TILE_ROWS: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_tile_rows(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_TILE_COLS: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_tile_cols(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_MIN_LUMA_CB_SIZE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_min_luma_cb_size(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_MB_MASK: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_mb_mask(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(h264_entropy_mode_list, + ARRAY_SIZE(h264_entropy_mode_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + entropy_mode = mvx_h264_entropy_mode_list[ret]; + ret = mvx_session_set_entropy_mode(session, entropy_mode); + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + i32_val = *ctrl->p_new.p_s32; + ret = find_idx(multi_slice_mode_list, + ARRAY_SIZE(multi_slice_mode_list), i32_val); + if (ret == -EINVAL) + goto unlock_mutex; + + multi_slice_mode = mvx_multi_slice_mode_list[ret]; + ret = mvx_session_set_multi_slice_mode(session, + multi_slice_mode); + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_multi_slice_max_mb(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE: + i32_val = *ctrl->p_new.p_s32; + vp9_prob_update = mvx_vp9_prob_update_list[i32_val]; + ret = mvx_session_set_vp9_prob_update(session, + vp9_prob_update); + break; + case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_mv_h_search_range(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_mv_v_search_range(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_BITDEPTH_CHROMA: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_bitdepth_chroma(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_BITDEPTH_LUMA: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_bitdepth_luma(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_GOP_RESET_PFRAMES: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_gop_reset_pframes(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_LTR_RESET_PERIOD: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_ltr_reset_period(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_force_chroma_format(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE: + i32_val = *ctrl->p_new.p_s32; + rgb_to_yuv_mode = mvx_rgb_to_yuv_mode_list[i32_val]; + ret = mvx_session_set_rgb_to_yuv_mode(session, + rgb_to_yuv_mode); + break; + case V4L2_CID_MVE_VIDEO_BANDWIDTH_LIMIT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_band_limit(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_CABAC_INIT_IDC: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_cabac_init_idc(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_H263, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_H263, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_H263, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_min_qp(session, MVX_FORMAT_H263, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_max_qp(session, MVX_FORMAT_H263, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_H264, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_H264, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_H264, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_min_qp(session, MVX_FORMAT_H264, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_max_qp(session, MVX_FORMAT_H264, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_HEVC, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_HEVC, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_HEVC, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_min_qp(session, MVX_FORMAT_HEVC, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_max_qp(session, MVX_FORMAT_HEVC, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_i_frame_qp(session, MVX_FORMAT_VP9, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_p_frame_qp(session, MVX_FORMAT_VP9, + i32_val); + break; + case V4L2_CID_MVE_VIDEO_VPX_B_FRAME_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_b_frame_qp(session, MVX_FORMAT_VP9, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_min_qp(session, MVX_FORMAT_VP9, + i32_val); + break; + case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_max_qp(session, MVX_FORMAT_VP9, + i32_val); + break; + case V4L2_CID_JPEG_RESTART_INTERVAL: + i32_val = *ctrl->p_new.p_s32; + if (i32_val != -1) + ret = mvx_session_set_resync_interval(session, i32_val); + + break; + case V4L2_CID_MVE_VIDEO_INIT_QP_I: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_init_qp_i(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_INIT_QP_P: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_init_qp_p(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_SAO_LUMA: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_sao_luma(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_SAO_CHROMA: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_sao_chroma(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_QP_DELTA_I_P: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_delta_I_P(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_QP_REF_RB_EN: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_ref_rb_eb(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_RC_CLIP_TOP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_rc_clip_top(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_RC_CLIP_BOT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_rc_clip_bot(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_QP_MAP_CLIP_TOP: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_qpmap_clip_top(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_QP_MAP_CLIP_BOT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_qpmap_clip_bot(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_MAX_QP_I: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_max_qp_i(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_MIN_QP_I: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_min_qp_i(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_QP_FIXED: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_fixedqp(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_VISIBLE_WIDTH: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_visible_width(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_VISIBLE_HEIGHT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_visible_height(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_GDR_NUMBER: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_gdr_number(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_SCD_ENABLE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_scd_enable(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_SCD_PERCENT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_scd_percent(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_SCD_THRESHOLD: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_scd_threshold(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_AQ_SSIM_EN: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_aq_ssim_en(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_AQ_NEG_RATIO: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_aq_neg_ratio(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_AQ_POS_RATIO: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_aq_pos_ratio(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_AQ_QPDELTA_LMT: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_aq_qpdelta_lmt(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_AQ_INIT_FRM_AVG_SVAR: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_aq_init_frm_avg_svar(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_GDR_PERIOD: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_gdr_period(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_MULIT_SPS_PPS: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_mulit_sps_pps(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENABLE_VISUAL: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enable_visual(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENABLE_ADAPTIVE_INTRA_BLOCK: + i32_val = *ctrl->p_new.p_s32; + if (i32_val != 0) + ret = mvx_session_set_adaptive_intra_block(session, i32_val); + break; + case V4L2_CID_JPEG_COMPRESSION_QUALITY: + i32_val = *ctrl->p_new.p_s32; + if (i32_val != 0) + ret = mvx_session_set_jpeg_quality(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_JPEG_QUALITY_LUMA: + i32_val = *ctrl->p_new.p_s32; + if (i32_val != 0) + ret = mvx_session_set_jpeg_quality_luma(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_JPEG_QUALITY_CHROMA: + i32_val = *ctrl->p_new.p_s32; + if (i32_val != 0) + ret = mvx_session_set_jpeg_quality_chroma(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_COLOR_CONVERSION: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_color_conversion(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_FORCED_UV_VALUE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_forced_uv_value(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_DSL_INTERP_MODE: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_dsl_interpolation_mode(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_DISABLED_FEATURES: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_disabled_features(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_JOB_FRAMES: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_job_frames(session, i32_val); + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + ret = mvx_session_set_force_key_frame(session, 1); + break; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + i32_val = *ctrl->p_new.p_s32; + vsession->session.port[MVX_DIR_INPUT].buffer_min = i32_val; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + i32_val = *ctrl->p_new.p_s32; + vsession->session.port[MVX_DIR_OUTPUT].buffer_min = i32_val; + break; + case V4L2_CID_MVE_VIDEO_MAX_BUFFERS_FOR_CAPTURE: + i32_val = *ctrl->p_new.p_s32; + vsession->session.port[MVX_DIR_OUTPUT].buffer_max = i32_val; + break; +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: + mvx_v4l2_session_set_hdr10_cll_info(vsession, + ctrl->p_new.p_hdr10_cll); + break; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: + mvx_v4l2_session_set_hdr10_mastering(vsession, + ctrl->p_new.p_hdr10_mastering); + break; +#endif + case V4L2_CID_MVE_VIDEO_AV1_FSF: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_fsf_mode(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_PRIORITY: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_priority(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_ANGULAR: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enc_intra_ipenalty_angular(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_PLANAR: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enc_intra_ipenalty_planar(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_DC: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enc_intra_ipenalty_dc(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_ANGULAR: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enc_inter_ipenalty_angular(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_PLANAR: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enc_inter_ipenalty_planar(session, i32_val); + break; + case V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_DC: + i32_val = *ctrl->p_new.p_s32; + ret = mvx_session_set_enc_inter_ipenalty_dc(session, i32_val); + break; + } +unlock_mutex: + mutex_unlock(&vsession->mutex); + + return ret; +} + +/** + * get_volatile_ctrl() - Get control value. + * @ctrl: V4L2 control. + * + * Return: 0 on success, else error code. + */ +static int get_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct mvx_v4l2_session *vsession = + container_of(ctrl->handler, struct mvx_v4l2_session, + v4l2_ctrl); + + switch (ctrl->id) { + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + ctrl->val = vsession->session.port[MVX_DIR_INPUT].buffer_min; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + ctrl->val = vsession->session.port[MVX_DIR_OUTPUT].buffer_min; + break; + case V4L2_CID_MVE_VIDEO_MAX_BUFFERS_FOR_CAPTURE: + ctrl->val = vsession->session.port[MVX_DIR_OUTPUT].buffer_max; + break; +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: + mvx_v4l2_session_get_hdr10_cll_info(vsession, + ctrl->p_new.p_hdr10_cll); + break; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: + mvx_v4l2_session_get_hdr10_mastering(vsession, + ctrl->p_new.p_hdr10_mastering); + break; +#endif + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported get control. id=%u.", + ctrl->id); + return -EINVAL; + } + + return 0; +} + +/* + * Callbacks required by V4L2 framework to implement controls support. + */ +static const struct v4l2_ctrl_ops ctrl_ops = { + .g_volatile_ctrl = get_volatile_ctrl, + .s_ctrl = set_ctrl +}; + +/** + * get_skip_mask() - Calculate V4L2 menu skip mask. + * @list: Array of menu items. + * @cnt: Number of menu items. + * + * Return: V4L2 menu skip mask. + */ +static uint64_t get_skip_mask(const uint8_t *list, + size_t cnt) +{ + uint64_t mask = 0; + int i; + + for (i = 0; i < cnt; ++i) + mvx_set_bit(list[i], &mask); + + return ~mask; +} + +/** + * mvx_v4l2_ctrl_new_custom_int() - Create custom V4L2 integer control. + * @hnd: V4L2 handler. + * @id: Id of a control. + * @name: Name of a control. + * @min: Minimum allowed value. + * @max: Maximum allowed value. + * @def: Default value. + * @step: Step. + * + * Return: Pointer to v4l2_ctrl structure in case of success, + * or NULL in case of failure. + */ +static struct v4l2_ctrl *mvx_v4l2_ctrl_new_custom_int( + struct v4l2_ctrl_handler *hnd, + int id, + const char *name, + int64_t min, + int64_t max, + int64_t def, + int32_t step) +{ + struct v4l2_ctrl_config cfg; + + memset(&cfg, 0, sizeof(cfg)); + + cfg.id = id; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_INTEGER; + cfg.name = name; + cfg.min = min; + cfg.max = max; + cfg.def = def; + cfg.step = step; + + return v4l2_ctrl_new_custom(hnd, &cfg, NULL); +} + +/** + * mvx_v4l2_ctrl_new_custom_tristate() - Create custom V4L2 tristate control. + * @hnd: V4L2 handler. + * @id: Id of a control. + * @name: Name of a control. + * @def: Default value. + * + * Return: Pointer to v4l2_ctrl structure in case of success, + * or NULL in case of failure. + */ +static struct v4l2_ctrl *mvx_v4l2_ctrl_new_custom_tristate( + struct v4l2_ctrl_handler *hnd, + int id, + const char *name, + enum mvx_tristate def) +{ + struct v4l2_ctrl_config cfg; + + memset(&cfg, 0, sizeof(cfg)); + + cfg.id = id; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_INTEGER; + cfg.name = name; + cfg.min = -1; + cfg.max = 1; + cfg.def = def; + cfg.step = 1; + + return v4l2_ctrl_new_custom(hnd, &cfg, NULL); +} + +static int mvx_v4l2_ctrls_init_hdr10(struct v4l2_ctrl_handler *hnd, + unsigned long flags) +{ +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_new_std_compound( + hnd, &ctrl_ops, V4L2_CID_COLORIMETRY_HDR10_CLL_INFO, + v4l2_ctrl_ptr_create((void *)NULL)); + if (ctrl == NULL) + return -EINVAL; + ctrl->flags |= flags; + + ctrl = v4l2_ctrl_new_std_compound( + hnd, &ctrl_ops, V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY, + v4l2_ctrl_ptr_create((void *)NULL)); + if (ctrl == NULL) + return -EINVAL; + ctrl->flags |= flags; +#endif + + return 0; +} + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +int mvx_v4l2_ctrls_init_dec(struct v4l2_ctrl_handler *hnd) +{ + int ret; + struct v4l2_ctrl_config cfg; + struct v4l2_ctrl *ctrl; + + ret = v4l2_ctrl_handler_init(hnd, 128); + if (ret != 0) + return ret; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SECURE_VIDEO, + "secure video", 0, 1, 0, 1); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_NALU_FORMAT; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "nalu format"; + cfg.max = ARRAY_SIZE(nalu_format_str) - 1; + cfg.def = 0; + cfg.qmenu = nalu_format_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_STREAM_ESCAPING, + "stream escaping", MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10, + get_skip_mask(h264_profile_list, + ARRAY_SIZE(h264_profile_list)), + V4L2_MPEG_VIDEO_H264_PROFILE_MAIN); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, + get_skip_mask(h265_profile_list, + ARRAY_SIZE(h265_profile_list)), + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VP9_PROFILE, + V4L2_MPEG_VIDEO_VP9_PROFILE_2, + get_skip_mask(vp9_profile_list, + ARRAY_SIZE(vp9_profile_list)), + V4L2_MPEG_VIDEO_VP9_PROFILE_0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VP8_PROFILE, + V4L2_MPEG_VIDEO_VP8_PROFILE_0, + get_skip_mask(vp8_profile_list, + ARRAY_SIZE(vp8_profile_list)), + V4L2_MPEG_VIDEO_VP8_PROFILE_0); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_VC1_PROFILE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "vc1 profile"; + cfg.max = ARRAY_SIZE(vc1_profile_str) - 1; + cfg.def = 0; + cfg.qmenu = vc1_profile_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_AV1_PROFILE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "av1 profile"; + cfg.max = ARRAY_SIZE(av1_profile_str) - 1; + cfg.def = 0; + cfg.qmenu = av1_profile_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_IGNORE_STREAM_HEADERS, + "ignore stream headers", MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_FRAME_REORDERING, + "frame reordering", MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_INTBUF_SIZE, + "internal buffer size", 0, INT_MAX, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT, + "force chroma format", -1, INT_MAX, 0, 1); + if (ctrl == NULL) + goto handler_free; + + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + 1, 32, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, + 1, 32, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_MAX_BUFFERS_FOR_CAPTURE, + "max buffer count to allocate", + 1, VIDEO_MAX_FRAME, VIDEO_MAX_FRAME, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_COLOR_CONVERSION, + "yuv2rgb color conversion", + 0, 0x1f, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_DSL_INTERP_MODE, + "dsl interp mode", + 0, 0xff, 0xff, 1); + if (ctrl == NULL) + goto handler_free; + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_DISABLED_FEATURES, + "disabled features", + 0, INT_MAX, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_JOB_FRAMES, + "job frames", + 0, INT_MAX, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ret = mvx_v4l2_ctrls_init_hdr10(hnd, + V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY); + if (ret != 0) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_AV1_FSF, + "av1 fsf mode", + MVX_INVALID_VAL, 1, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_PRIORITY, + "priority", + 0, 3, 2, 1); + if (ctrl == NULL) + goto handler_free; + + ret = v4l2_ctrl_handler_setup(hnd); + if (ret != 0) + goto handler_free; + + return 0; + +handler_free: + v4l2_ctrl_handler_free(hnd); + return -EINVAL; +} + +int mvx_v4l2_ctrls_init_enc(struct v4l2_ctrl_handler *hnd) +{ + int ret; + struct v4l2_ctrl_config cfg; + struct v4l2_ctrl *ctrl; + + ret = v4l2_ctrl_handler_init(hnd, 128); + if (ret != 0) + return ret; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SECURE_VIDEO, + "secure video", 0, 1, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_RC_I_MODE, + "i frame bit mode of rc", 0, 1, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_RC_I_RATIO, + "i frame bit ratio of rc", 0, 100, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_INTER_MED_BUF_SIZE, + "intermediate buffer size", 0, 1073741823, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SVCT3_LEVEL1_PERIOD, + "svct3 level1 period", 0, 255, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_HRD_BUFFER_SIZE, + "HRD buffer size", 0, 1073741823, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, + 0, 1, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, + get_skip_mask(video_bitrate_mode_list, + ARRAY_SIZE(video_bitrate_mode_list)), + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, + 1000, 480000000, 1, 500000); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, + 1000, 480000000, 1, 800000); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_NALU_FORMAT; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "nalu format"; + cfg.max = ARRAY_SIZE(nalu_format_str) - 1; + cfg.def = 0; + cfg.qmenu = nalu_format_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_STREAM_ESCAPING, + "stream escaping", MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10, + get_skip_mask(h264_profile_list, + ARRAY_SIZE(h264_profile_list)), + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, + get_skip_mask(h265_profile_list, + ARRAY_SIZE(h265_profile_list)), + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VP9_PROFILE, + V4L2_MPEG_VIDEO_VP9_PROFILE_2, + get_skip_mask(vp9_profile_list, + ARRAY_SIZE(vp9_profile_list)), + V4L2_MPEG_VIDEO_VP9_PROFILE_0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VP8_PROFILE, + V4L2_MPEG_VIDEO_VP8_PROFILE_0, + get_skip_mask(vp8_profile_list, + ARRAY_SIZE(vp8_profile_list)), + V4L2_MPEG_VIDEO_VP8_PROFILE_0); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_VC1_PROFILE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "vc1 profile"; + cfg.max = ARRAY_SIZE(vc1_profile_str) - 1; + cfg.def = 0; + cfg.qmenu = vc1_profile_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_6_1, + get_skip_mask(h264_level_list, ARRAY_SIZE(h264_level_list)), + V4L2_MPEG_VIDEO_H264_LEVEL_1_0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1, + get_skip_mask(h265_level_list, ARRAY_SIZE(h265_level_list)), + V4L2_MPEG_VIDEO_HEVC_LEVEL_1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_TIER, + V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, + get_skip_mask(h265_tier_list, ARRAY_SIZE(h265_tier_list)), + V4L2_MPEG_VIDEO_HEVC_TIER_MAIN); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_B_FRAMES, + 0, INT_MAX, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE, + 1, INT_MAX, 1, 30); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_GOP_TYPE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "GOP type"; + cfg.max = ARRAY_SIZE(gop_type_str) - 1; + cfg.def = 0; + cfg.qmenu = gop_type_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, + 0, INT_MAX, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_CONSTR_IPRED, + "constrained intra prediction", MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_ENTROPY_SYNC, "entropy sync", + MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_tristate( + hnd, V4L2_CID_MVE_VIDEO_TEMPORAL_MVP, + "temporal mvp", MVX_TRI_UNSET); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_TILE_ROWS, + "tile rows", 0, 65536, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_TILE_COLS, + "tile columns", 0, 65536, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_MIN_LUMA_CB_SIZE, + "min luma cb size", 0, 16, 0, 8); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_MB_MASK; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_BITMASK; + cfg.name = "macroblocks mask"; + cfg.def = 0x7fff; + cfg.min = 0; + cfg.max = 0x7fff; + cfg.step = 0; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "H264 Entropy Mode"; + cfg.max = ARRAY_SIZE(h264_entropy_mode_str) - 1; + cfg.def = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_NONE; + cfg.qmenu = h264_entropy_mode_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std_menu( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB, + get_skip_mask(multi_slice_mode_list, + ARRAY_SIZE(multi_slice_mode_list)), + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, + 0, INT_MAX, 1, 0); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "VP9 prob update"; + cfg.max = ARRAY_SIZE(vp9_prob_update_str) - 1; + cfg.def = cfg.max; + cfg.qmenu = vp9_prob_update_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE, + 0, INT_MAX, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE, + 0, INT_MAX, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_BITDEPTH_CHROMA, + "bitdepth chroma", 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_BITDEPTH_LUMA, + "bitdepth luma", 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_GOP_RESET_PFRAMES, + "gop reset p frames", 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_LTR_RESET_PERIOD, + "ltr reset period", 0, 0xffff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT, + "force chroma format", -1, INT_MAX, 0, 1); + if (ctrl == NULL) + goto handler_free; + + memset(&cfg, 0, sizeof(cfg)); + cfg.id = V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE; + cfg.ops = &ctrl_ops; + cfg.type = V4L2_CTRL_TYPE_MENU; + cfg.name = "RGB to YUV conversion mode"; + cfg.max = ARRAY_SIZE(rgb_to_yuv_mode_str) - 1; + cfg.def = MVX_RGB_TO_YUV_MODE_MAX; + cfg.qmenu = rgb_to_yuv_mode_str; + ctrl = v4l2_ctrl_new_custom(hnd, &cfg, NULL); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_BANDWIDTH_LIMIT, + "bandwidth limit", 0, INT_MAX, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_CABAC_INIT_IDC, + "CABAC init IDC", 0, 4, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP, + 0, 31, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP, + 0, 31, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP, + 0, 31, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_MIN_QP, + 1, 31, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H263_MAX_QP, + 1, 31, 1, 31); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, + 0, 51, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, + 0, 51, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, + 0, 51, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MIN_QP, + 1, 51, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP, + 1, 51, 1, 51); + if (ctrl == NULL) + goto handler_free; +#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE) + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, + "HEVC I frame QP value", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, + "HEVC P frame QP value", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, + "HEVC B frame QP value", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + "HEVC min QP value", + 1, 51, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, + "HEVC max QP value", + 1, 51, 51, 1); + if (ctrl == NULL) + goto handler_free; +#else + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, + 0, 51, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, + 0, 51, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, + 0, 51, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + 1, 51, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, + 1, 51, 1, 51); + if (ctrl == NULL) + goto handler_free; +#endif + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP, + 0, 63, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP, + 0, 63, 1, 0); + if (ctrl == NULL) + goto handler_free; + + v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, + 0, 0, 0, 0); + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_VPX_B_FRAME_QP, + "VPx B frame QP value", + 0, 63, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, + 0, 63, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MPEG_VIDEO_VPX_MAX_QP, + 0, 63, 1, 63); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_QP_FIXED, + "fixed qp", + 0, 63, 0, 1); + if (ctrl == NULL) + goto handler_free; + + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, + 1, 32, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, + 1, 32, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL, + -1, 0xffff, 1, -1); + if (ctrl == NULL) + goto handler_free; + + ctrl = v4l2_ctrl_new_std( + hnd, &ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, + 0, 100, 1, 0); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_JPEG_QUALITY_LUMA, + "JPEG compress luma quality", + 0, 100, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_JPEG_QUALITY_CHROMA, + "JPEG compress chroma quality", + 0, 100, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_INIT_QP_I, + "init qp of I frame", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_INIT_QP_P, + "init qp of P frame", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SAO_LUMA, + "sao luma", + 0, 0xffff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SAO_CHROMA, + "sao chroma", + 0, 0xffff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_QP_DELTA_I_P, + "delta qp of I and P frame", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_QP_REF_RB_EN, + "qp ref rb en", + 0, 51, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_RC_CLIP_TOP, + "rc clip top", + 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_RC_CLIP_BOT, + "rc clip bot", + 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_QP_MAP_CLIP_TOP, + "qp map clip top", + 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_QP_MAP_CLIP_BOT, + "qp map clip bot", + 0, 0xff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_MAX_QP_I, + "qp max for I frame", + 0, 63, 63, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_MIN_QP_I, + "qp min for I frame", + 0, 63, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_VISIBLE_WIDTH, + "visible width for encode", + 0, 0xffff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_VISIBLE_HEIGHT, + "visible height for encode", + 0, 0xffff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_GDR_NUMBER, + "gdr number", + 0, 0xffff, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_GDR_PERIOD, + "gdr period", + 0, 0xffff, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SCD_ENABLE, + "scd enable", + 0, 1, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SCD_PERCENT, + "scd percent", + 0, 10, 3, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_SCD_THRESHOLD, + "scd threshold", + 0, 2047, 100, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_AQ_SSIM_EN, + "aq ssim enable", + 0, 1, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_AQ_NEG_RATIO, + "aq neg ratio", + 0, 63, 24, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_AQ_POS_RATIO, + "aq pos ratio", + 0, 63, 24, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_AQ_QPDELTA_LMT, + "aq qpdelta lmt", + 0, 7, 7, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_AQ_INIT_FRM_AVG_SVAR, + "aq init frm avg svar", + 0, 15, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_MULIT_SPS_PPS, + "sps pps", + 0, 0xffff, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENABLE_VISUAL, + "enable visual", + 0, 0x1f, 0, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENABLE_ADAPTIVE_INTRA_BLOCK, + "enable adaptive intra block", + 0, 1, 0, 1); + if (ctrl == NULL) + goto handler_free; + + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_FORCED_UV_VALUE, + "forced uv value", + 0, 0x400, 0x400, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_JOB_FRAMES, + "job frames", + 0, INT_MAX, 1, 1); + if (ctrl == NULL) + goto handler_free; + + ret = mvx_v4l2_ctrls_init_hdr10(hnd, 0); + if (ret != 0) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_PRIORITY, + "priority", + 0, 3, 2, 1); + if (ctrl == NULL) + goto handler_free; + + ret = v4l2_ctrl_handler_setup(hnd); + if (ret != 0) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_ANGULAR, + "intra ipenalty angular", + MVX_INVALID_VAL, 31, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_PLANAR, + "intra ipenalty planar", + MVX_INVALID_VAL, 31, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_DC, + "intra ipenalty dc", + MVX_INVALID_VAL, 31, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_ANGULAR, + "inter ipenalty angular", + MVX_INVALID_VAL, 31, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_PLANAR, + "inter ipenalty planar", + MVX_INVALID_VAL, 31, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + ctrl = mvx_v4l2_ctrl_new_custom_int( + hnd, V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_DC, + "inter ipenalty dc", + MVX_INVALID_VAL, 31, MVX_INVALID_VAL, 1); + if (ctrl == NULL) + goto handler_free; + + return 0; + +handler_free: + v4l2_ctrl_handler_free(hnd); + return -EINVAL; +} + +void mvx_v4l2_ctrls_done(struct v4l2_ctrl_handler *hnd) +{ + v4l2_ctrl_handler_free(hnd); +} diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_ctrls.h b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_ctrls.h new file mode 100755 index 000000000000..5ecbfbbc84ee --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_ctrls.h @@ -0,0 +1,75 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_V4L2_CTRLS_H_ +#define _MVX_V4L2_CTRLS_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_v4l2_ctrls_init_dec() - Initialize V4L2 control handler for decoder. + * @hnd: V4L2 control handler. + * + * This function initializes V4L2 controls for handler @hnd. + * Controls set to their default values. + * + * Return: 0 on success, error code otherwise. + */ +int mvx_v4l2_ctrls_init_dec(struct v4l2_ctrl_handler *hnd); + +/** + * mvx_v4l2_ctrls_init_enc() - Initialize V4L2 control handler for encoder. + * @hnd: V4L2 control handler. + * + * This function initializes V4L2 controls for handler @hnd. + * Controls set to their default values. + * + * Return: 0 on success, error code otherwise. + */ +int mvx_v4l2_ctrls_init_enc(struct v4l2_ctrl_handler *hnd); + +/** + * mvx_v4l2_ctrls_done() - Destroy V4L2 control handler. + * @hnd: V4L2 control handler. + * + * This function destroys V4L2 control handler. + */ +void mvx_v4l2_ctrls_done(struct v4l2_ctrl_handler *hnd); + +#endif /* _MVX_V4L2_CTRLS_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_fops.c b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_fops.c new file mode 100755 index 000000000000..2c31a15d34f3 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_fops.c @@ -0,0 +1,238 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include +#include +#include +#include "mvx_ext_if.h" +#include "mvx_v4l2_buffer.h" +#include "mvx_v4l2_ctrls.h" +#include "mvx_v4l2_fops.h" +#include "mvx_v4l2_session.h" +#include "mvx_v4l2_vidioc.h" +#include "mvx_log_group.h" + +/**************************************************************************** + * Exported functions and variables + ****************************************************************************/ + +int mvx_v4l2_open(struct file *file) +{ + struct mvx_ext_if *ctx = video_drvdata(file); + struct mvx_v4l2_session *session; + struct v4l2_format fmt = { 0 }; + int ret; + + session = devm_kzalloc(ctx->dev, sizeof(*session), GFP_KERNEL); + if (session == NULL) { + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Failed to allocate V4L2 session."); + return -ENOMEM; + } + + MVX_SESSION_INFO(&session->session, "v4l2: Open device. id=%u.", + ctx->dev->id); + + ret = mvx_v4l2_session_construct(session, ctx); + if (ret != 0) + goto free_session; + + file->private_data = &session->fh; + v4l2_fh_init(&session->fh, &ctx->vdev); + v4l2_fh_add(&session->fh); + + /* Set default port formats. */ + fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV12M; + fmt.fmt.pix.width = 64; + fmt.fmt.pix.height = 64; + (void)mvx_v4l2_vidioc_s_fmt_vid_out(file, NULL, &fmt); + + fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + (void)mvx_v4l2_vidioc_s_fmt_vid_cap(file, NULL, &fmt); + + if (ctx->is_encoder) + ret = mvx_v4l2_ctrls_init_enc(&session->v4l2_ctrl); + else + ret = mvx_v4l2_ctrls_init_dec(&session->v4l2_ctrl); + if (ret != 0) { + MVX_SESSION_WARN(&session->session, + "Failed to register V4L2 controls handler. ret=%x", + ret); + goto put_session; + } + + session->fh.ctrl_handler = &session->v4l2_ctrl; + + return 0; + +put_session: + + /* + * Session was completely constructed, so we have to destroy it + * gracefully using reference counting. + */ + mvx_session_put(&session->session); + if (!wait_for_completion_timeout(&session->cmp, msecs_to_jiffies(3000))) + MVX_SESSION_ERR(&session->session, "Wait destroy session complete timeout."); + v4l2_fh_del(&session->fh); + v4l2_fh_exit(&session->fh); + +free_session: + devm_kfree(ctx->dev, session); + + return ret; +} + +int mvx_v4l2_release(struct file *file) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + int i; + int ret; + + if (file != NULL && file->private_data == NULL) { + MVX_SESSION_ERR(&vsession->session, "NULL private_data is not expected."); + return -EINVAL; + } + + MVX_SESSION_INFO(&vsession->session, "v4l2: Release."); + + mutex_lock(&vsession->mutex); + + mvx_v4l2_ctrls_done(vsession->fh.ctrl_handler); + + for (i = 0; i < MVX_DIR_MAX; i++) + if (vsession->port[i].q_set != false) { + vb2_queue_release(&vsession->port[i].vb2_queue); + vsession->port[i].q_set = false; + } + + reinit_completion(&vsession->cmp); + ret = mvx_session_put(&vsession->session); + if (ret == 0) + mutex_unlock(&vsession->mutex); + while (!wait_for_completion_timeout(&vsession->cmp, msecs_to_jiffies(15000))) { + MVX_SESSION_ERR(&vsession->session, "Wait destroy session complete timeout."); + if (mvx_session_ref_read(&vsession->session) == 0) { + MVX_SESSION_ERR(&vsession->session, "Session reference count already be 0."); + break; + } + mvx_session_put(&vsession->session); + } + + file->private_data = NULL; + v4l2_fh_del(&vsession->fh); + v4l2_fh_exit(&vsession->fh); + + MVX_SESSION_INFO(&vsession->session, "v4l2: Release exit."); + devm_kfree(vsession->ext->dev, vsession); + return 0; +} + +unsigned int mvx_v4l2_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + unsigned long events = poll_requested_events(wait); + unsigned int revents = 0; + struct v4l2_fh *fh = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) { + MVX_SESSION_WARN(&vsession->session, "v4l2: Poll. Get lock failed."); + return 0; + } + + if (vsession->session.error != 0) { + revents = POLLERR; + goto unlock_mutex; + } + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + revents |= EPOLLPRI; + + /* POLLPRI events are handled by Vb2 */ + if (vb2_is_streaming(&vsession->port[MVX_DIR_INPUT].vb2_queue) && + (events & EPOLLOUT)) + revents |= vb2_poll(&vsession->port[MVX_DIR_INPUT].vb2_queue, + file, wait); + if (vb2_is_streaming(&vsession->port[MVX_DIR_OUTPUT].vb2_queue) && + (events & (EPOLLIN | EPOLLPRI))) + revents |= vb2_poll(&vsession->port[MVX_DIR_OUTPUT].vb2_queue, + file, wait); + + MVX_SESSION_VERBOSE(&vsession->session, + "v4l2: Poll. events=0x%lx, revents=0x%x, nevents=%d.", + events, revents, v4l2_event_pending(&vsession->fh)); + +unlock_mutex: + mutex_unlock(&vsession->mutex); + + return revents; +} + +int mvx_v4l2_mmap(struct file *file, + struct vm_area_struct *vma) +{ + struct mvx_v4l2_session *session = file_to_session(file); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + enum mvx_direction dir; + struct mvx_v4l2_port *vport; + struct vb2_queue *q; + int ret; + + MVX_SESSION_INFO(&session->session, + "v4l2: Memory map. start=0x%08lx, end=0x%08lx, pgoff=0x%08lx, flags=0x%08lx.", + vma->vm_start, vma->vm_end, + vma->vm_pgoff, vma->vm_flags); + + if (offset >= DST_QUEUE_OFF_BASE) { + dir = MVX_DIR_OUTPUT; + vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); + } else { + dir = MVX_DIR_INPUT; + } + + vport = &session->port[dir]; + q = &vport->vb2_queue; + + ret = vb2_mmap(q, vma); + if (ret != 0) { + MVX_SESSION_WARN(&session->session, + "Failed to memory map buffer. q=%px, pgoff=0x%08lx, dir=%d, ret=%d", + q, vma->vm_pgoff, dir, ret); + return ret; + } + + return 0; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_fops.h b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_fops.h new file mode 100755 index 000000000000..b9306c3f4702 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_fops.h @@ -0,0 +1,64 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_V4L2_FOPS_H_ +#define _MVX_V4L2_FOPS_H_ + +/* + * Callbacks for struct v4l2_file_operations. + * + * Prototypes declared bellow represent callbacks required by v4l2 framework. + * They are needed to implement certain syscalls. + */ + +/** + * mvx_v4l2_open() - Callback needed to implement the open() syscall. + */ +int mvx_v4l2_open(struct file *file); + +/** + * mvx_v4l2_release() - Callback needed to implement the release() syscall. + */ +int mvx_v4l2_release(struct file *file); + +/** + * mvx_v4l2_poll() - Callback needed to implement the poll() syscall. + */ +unsigned int mvx_v4l2_poll(struct file *file, + struct poll_table_struct *wait); + +/** + * mvx_v4l2_mmap() - Callback needed to implement the mmap() syscall. + */ +int mvx_v4l2_mmap(struct file *file, + struct vm_area_struct *vma); + +#endif /* _MVX_V4L2_FOPS_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_session.c b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_session.c new file mode 100755 index 000000000000..777a25904ad7 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_session.c @@ -0,0 +1,979 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include "mvx_ext_if.h" +#include "mvx_seq.h" +#include "mvx_v4l2_buffer.h" +#include "mvx_v4l2_session.h" +#include "mvx_log_group.h" + +#define V4L2_MVX_COLORIMETRY_UNSUPPORTED (-1) + +static const unsigned int range_map[] = { + V4L2_QUANTIZATION_DEFAULT, + V4L2_QUANTIZATION_LIM_RANGE, + V4L2_QUANTIZATION_FULL_RANGE, +}; + +static const unsigned int primaries_map[] = { + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_COLORSPACE_REC709, /*Rec. ITU-R BT.709-6*/ + V4L2_COLORSPACE_DEFAULT, + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_COLORSPACE_470_SYSTEM_M, /*Rec. ITU-R BT.470-6 System M*/ + V4L2_COLORSPACE_470_SYSTEM_BG, /*Rec. ITU-R BT.470-6 System B, G*/ + V4L2_COLORSPACE_SMPTE170M, /*SMPTE170M*/ + V4L2_COLORSPACE_SMPTE240M, /*SMPTE240M*/ + V4L2_COLORSPACE_GENERIC_FILM, /*Generic film*/ + V4L2_COLORSPACE_BT2020, /*Rec. ITU-R BT.2020-2*/ + V4L2_COLORSPACE_ST428, /*SMPTE ST 428-1 (2006)*/ + V4L2_COLORSPACE_DCI_P3, /*SMPTE RP 431-2 (2011), SMPTE ST 2113 (2019) "P3DCI"*/ +}; + +static const unsigned int xfer_map[] = { + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_XFER_FUNC_709, /*Rec. ITU-R BT.709-6*/ + V4L2_XFER_FUNC_DEFAULT, + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_XFER_FUNC_GAMMA22, /*Assumed display gamma 2.2. Rec. ITU-R BT.470-6 System M*/ + V4L2_XFER_FUNC_GAMMA28, /*Assumed display gamma 2.8. Rec. Rec. ITU-R BT.470-6 System B, G*/ + V4L2_XFER_FUNC_709, /*SMPTE170M*/ + V4L2_XFER_FUNC_SMPTE240M, /*SMPTE240M*/ + V4L2_XFER_FUNC_NONE, /*Linear transfer characteristics*/ + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_MVX_COLORIMETRY_UNSUPPORTED, /*IEC 61966-2-4*/ + V4L2_XFER_FUNC_BT1361, /*Rec. ITU-R BT.1361-0 extended colour gamut*/ + V4L2_XFER_FUNC_SRGB, /*IEC 61966-2-1 sRGB or sYCC*/ + V4L2_XFER_FUNC_BT2020_10, /*Rec. ITU-R BT.2020-2 (10 bit system)*/ + V4L2_XFER_FUNC_BT2020_12, /*Rec. ITU-R BT.2020-2 (12 bit system)*/ + V4L2_XFER_FUNC_SMPTE2084, /*SMPTE ST 2084*/ + V4L2_XFER_FUNC_ST428, /*SMPTE ST 428-1*/ + V4L2_XFER_FUNC_HLG, /*STD-B67 and Rec. ITU-R BT.2100-2 hybrid log-gamma (HLG) system*/ +}; + +static const unsigned int matrix_map[] = { + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_YCBCR_ENC_709, /*Rec. ITU-R BT.709-6*/ + V4L2_YCBCR_ENC_DEFAULT, + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_YCBCR_ENC_BT470_6M, /*Title 47 Code of Federal Regulations*/ + V4L2_YCBCR_ENC_601, /*Rec. ITU-R BT.601-7 625*/ + V4L2_YCBCR_ENC_601, /*Rec. ITU-R BT.601-7 525*/ + V4L2_YCBCR_ENC_SMPTE240M, /*SMPTE240M*/ + V4L2_MVX_COLORIMETRY_UNSUPPORTED, + V4L2_YCBCR_ENC_BT2020, /*Rec. ITU-R BT.2020-2*/ + V4L2_YCBCR_ENC_BT2020_CONST_LUM /*Rec. ITU-R BT.2020-2 constant*/ +}; + +/**************************************************************************** + * Exported and static functions + ****************************************************************************/ + +static void set_format(struct v4l2_pix_format_mplane *pix_mp, + unsigned int pixelformat, + unsigned int width, + unsigned int height, + unsigned int num_planes, + unsigned int *sizeimage, + unsigned int *bytesperline) +{ + int i; + + pix_mp->pixelformat = pixelformat; + pix_mp->width = width; + pix_mp->height = height; + pix_mp->num_planes = num_planes; + + + for (i = 0; i < num_planes; ++i) { + pix_mp->plane_fmt[i].sizeimage = sizeimage[i]; + pix_mp->plane_fmt[i].bytesperline = bytesperline[i]; + } +} + +static void set_video_signal_type(struct v4l2_pix_format_mplane *pix_mp, + uint8_t colour_primaries, + uint8_t transfer_characteristics, + uint8_t matrix_coeff, + uint8_t range) +{ + pix_mp->colorspace = colour_primaries < ARRAY_SIZE(primaries_map) ? + primaries_map[colour_primaries] : V4L2_COLORSPACE_DEFAULT; + pix_mp->xfer_func = transfer_characteristics < ARRAY_SIZE(xfer_map) ? + xfer_map[transfer_characteristics] : V4L2_XFER_FUNC_DEFAULT; + pix_mp->ycbcr_enc = matrix_coeff < ARRAY_SIZE(matrix_map) ? + matrix_map[matrix_coeff] : V4L2_YCBCR_ENC_DEFAULT; + pix_mp->quantization = range < ARRAY_SIZE(range_map) ? + range_map[range] : V4L2_QUANTIZATION_DEFAULT; + + // There is two colorspaces using BT709 primaries, use the range to differentiate. + if (pix_mp->colorspace == V4L2_COLORSPACE_REC709 && + pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE) + pix_mp->colorspace = V4L2_COLORSPACE_SRGB; +} + +static void set_default_video_signal_type(unsigned int colorspace, + unsigned int *xfer_func, unsigned int *ycbcr_enc, unsigned int *range) +{ + switch (colorspace) { + case V4L2_COLORSPACE_SMPTE170M: + case V4L2_COLORSPACE_470_SYSTEM_M: + case V4L2_COLORSPACE_470_SYSTEM_BG: + *ycbcr_enc = V4L2_YCBCR_ENC_601; + *xfer_func = V4L2_XFER_FUNC_709; + break; + case V4L2_COLORSPACE_REC709: + *ycbcr_enc = V4L2_YCBCR_ENC_709; + *xfer_func = V4L2_XFER_FUNC_709; + break; + case V4L2_COLORSPACE_SRGB: + case V4L2_COLORSPACE_JPEG: + *ycbcr_enc = V4L2_YCBCR_ENC_601; + *xfer_func = V4L2_XFER_FUNC_SRGB; + break; + case V4L2_COLORSPACE_OPRGB: + *ycbcr_enc = V4L2_YCBCR_ENC_601; + *xfer_func = V4L2_XFER_FUNC_OPRGB; + break; + case V4L2_COLORSPACE_BT2020: + *ycbcr_enc = V4L2_YCBCR_ENC_BT2020; + *xfer_func = V4L2_XFER_FUNC_709; + break; + case V4L2_COLORSPACE_SMPTE240M: + *ycbcr_enc = V4L2_YCBCR_ENC_SMPTE240M; + *xfer_func = V4L2_XFER_FUNC_SMPTE240M; + break; + case V4L2_COLORSPACE_RAW: + default: + /* Explicitly unknown */ + *ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + *xfer_func = V4L2_XFER_FUNC_DEFAULT; + } + + *range = V4L2_QUANTIZATION_DEFAULT; +} + +static int get_u32_array_index(const unsigned int *array, + unsigned int size, unsigned int data) +{ + int i = 0; + + for (i = 0; i < size; i++) { + if (array[i] == data) + return i; + } + + return -1; +} + +static void v4l2_port_show(struct mvx_v4l2_port *port, + struct seq_file *s) +{ + mvx_seq_printf(s, "mvx_v4l2_port", 0, "%px\n", port); + mvx_seq_printf(s, "pixelformat", 1, "0x%x\n", + port->pix_mp.pixelformat); + mvx_seq_printf(s, "vb2_queue", 1, "\n"); + mvx_seq_printf(s, "memory", 2, "%u\n", + port->vb2_queue.memory); + mvx_seq_printf(s, "min_buffers_needed", 2, "%u\n", + port->vb2_queue.min_buffers_needed); + mvx_seq_printf(s, "num_buffers", 2, "%u\n", + port->vb2_queue.num_buffers); + mvx_seq_printf(s, "queued_count", 2, "%u\n", + port->vb2_queue.queued_count); + mvx_seq_printf(s, "streaming", 2, "%u\n", + port->vb2_queue.streaming); + mvx_seq_printf(s, "error", 2, "%u\n", + port->vb2_queue.error); + mvx_seq_printf(s, "last_buffer_dequeued", 2, "%u\n", + port->vb2_queue.last_buffer_dequeued); +} + +static int port_stat_show(struct seq_file *s, + void *v) +{ + struct mvx_v4l2_port *vport = s->private; + struct mvx_session_port *sport = vport->port; + + mvx_session_port_show(sport, s); + seq_puts(s, "\n"); + v4l2_port_show(vport, s); + + return 0; +} + +static int port_stat_open(struct inode *inode, + struct file *file) +{ + return single_open(file, port_stat_show, inode->i_private); +} + +static const struct file_operations port_stat_fops = { + .open = port_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static int port_debugfs_init(struct device *dev, + unsigned int i, + struct mvx_v4l2_port *vport, + struct mvx_session_port *sport, + struct dentry *parent) +{ + char name[20]; + struct dentry *dentry; + + scnprintf(name, sizeof(name), "port%u", i); + vport->dentry = debugfs_create_dir(name, parent); + if (IS_ERR_OR_NULL(vport->dentry)) + return -ENOMEM; + + dentry = debugfs_create_file("stat", 0400, vport->dentry, vport, + &port_stat_fops); + if (IS_ERR_OR_NULL(dentry)) + return -ENOMEM; + + return 0; +} + +static int session_debugfs_init(struct mvx_v4l2_session *session, + struct dentry *parent) +{ + int ret; + char name[20]; + int i; + + scnprintf(name, sizeof(name), "%px", &session->session); + session->dentry = debugfs_create_dir(name, parent); + if (IS_ERR_OR_NULL(session->dentry)) + return -ENOMEM; + + for (i = 0; i < MVX_DIR_MAX; i++) { + struct mvx_v4l2_port *vport = &session->port[i]; + struct mvx_session_port *mport = &session->session.port[i]; + + ret = port_debugfs_init(session->ext->dev, i, vport, mport, + session->dentry); + if (ret != 0) + goto remove_dentry; + } + + return 0; + +remove_dentry: + debugfs_remove_recursive(session->dentry); + return ret; +} + +static struct mvx_v4l2_session *mvx_session_to_v4l2_session( + struct mvx_session *session) +{ + return container_of(session, struct mvx_v4l2_session, session); +} + +static void free_session(struct mvx_session *session) +{ + struct mvx_v4l2_session *s = mvx_session_to_v4l2_session(session); + + MVX_SESSION_INFO(session, "v4l2: Destroy session."); + + mvx_session_destruct(session); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(s->dentry); + + if (mutex_is_locked(&s->mutex)) { + mutex_unlock(&s->mutex); + } + complete(&s->cmp); +} + +static bool bitstream_need_merge(struct mvx_v4l2_session *vsession, + struct mvx_v4l2_buffer *vbuf) +{ + struct mvx_buffer *buf = &vbuf->buf; + + if(buf->dir == MVX_DIR_OUTPUT && mvx_is_bitstream(buf->format)) { + if (!(buf->flags & MVX_BUFFER_EOF) || (vsession->frame_bits_buf != NULL)) { + if (vsession->frame_bits_buf != NULL) { + struct vb2_buffer *vb_dst = &vsession->frame_bits_buf->vb2_v4l2_buffer.vb2_buf; + struct vb2_buffer *vb_src = &vbuf->vb2_v4l2_buffer.vb2_buf; + unsigned int size = vb_src->planes[0].bytesused - vb_src->planes[0].data_offset; + unsigned int space = vb_dst->planes[0].length - vb_dst->planes[0].bytesused; + if (vb2_plane_vaddr(vb_dst, 0) == NULL || vb2_plane_vaddr(vb_src, 0) == NULL) { + MVX_SESSION_WARN(&vsession->session, + "Unable to obtain kernel virtual address of dst/src plane\n"); + return false; + } + if (space < size) { + MVX_SESSION_WARN(&vsession->session, + "Remaining space in bitstream buffer (%d) is not enough to store extra %d bytes\n", + space, size); + return false; + } + } + + if (buf->planes[0].filled > 0) { + if (buf->planes[0].filled >= vbuf->vb2_v4l2_buffer.vb2_buf.planes[0].length) { + MVX_SESSION_WARN(&vsession->session, "No remaining space in bitstream buffer\n"); + return false; + } + return true; + } + } + } + + return false; +} + +static struct vb2_buffer *merge_bitstream(struct mvx_v4l2_session *vsession, + struct mvx_v4l2_buffer *vbuf) +{ + struct vb2_buffer *vb = NULL; + + mvx_buffer_synch(&vbuf->buf, DMA_FROM_DEVICE); + if (vsession->frame_bits_buf == NULL) { + vsession->frame_bits_buf = vbuf; + MVX_SESSION_INFO(&vsession->session, + "Partial bitstream offset %d, used %d\n", + vbuf->vb2_v4l2_buffer.vb2_buf.planes[0].data_offset, + vbuf->vb2_v4l2_buffer.vb2_buf.planes[0].bytesused); + } else { + /* merge bitstream buffers */ + struct vb2_buffer *vb_dst = &vsession->frame_bits_buf->vb2_v4l2_buffer.vb2_buf; + struct vb2_buffer *vb_src = &vbuf->vb2_v4l2_buffer.vb2_buf; + void *dst = vb2_plane_vaddr(vb_dst, 0) + vb_dst->planes[0].bytesused; + void *src = vb2_plane_vaddr(vb_src, 0) + vb_src->planes[0].data_offset; + unsigned int size = vb_src->planes[0].bytesused - vb_src->planes[0].data_offset; + if (size > 0) { + memcpy(dst, src, size); + vb_dst->planes[0].bytesused += size; + vsession->frame_bits_buf->buf.planes[0].filled += size; + MVX_SESSION_INFO(&vsession->session, + "Merged %d bytes of bitstream, total %d bytes\n", + size, vb_dst->planes[0].bytesused); + } + + mvx_session_qbuf(&vsession->session, MVX_DIR_OUTPUT, &vbuf->buf); + + if (vbuf->buf.flags & MVX_BUFFER_EOF) { + vsession->frame_bits_buf->vb2_v4l2_buffer.flags = vbuf->vb2_v4l2_buffer.flags; + vsession->frame_bits_buf = NULL; + vb = vb_dst; + } + } + + return vb; +} + +static void handle_event(struct mvx_session *session, + enum mvx_session_event event, + void *arg) +{ + struct mvx_v4l2_session *vsession = + mvx_session_to_v4l2_session(session); + + MVX_SESSION_INFO(&vsession->session, + "Event. event=%d, arg=%px.", event, arg); + + switch (event) { + case MVX_SESSION_EVENT_BUFFER: { + struct mvx_v4l2_buffer *vbuf = mvx_buffer_to_v4l2_buffer(arg); + struct vb2_buffer *vb = &vbuf->vb2_v4l2_buffer.vb2_buf; + + /* + * When streaming is stopped we don't always receive all + * buffers from FW back. So we just return them all to Vb2. + * If the FW later returns a buffer to us, we could silently + * skip it. + */ + if (vb->state != VB2_BUF_STATE_DEQUEUED) { + enum vb2_buffer_state state = + mvx_v4l2_buffer_update(vbuf); + + if(vbuf->buf.dir == MVX_DIR_OUTPUT && mvx_is_bitstream(vbuf->buf.format)) { + if (bitstream_need_merge(vsession, vbuf)) + vb = merge_bitstream(vsession, vbuf); + else if (vsession->frame_bits_buf != NULL) { + /* Remaining space in bitstream buffer is not enough, just send it to client */ + vb2_buffer_done(&vsession->frame_bits_buf->vb2_v4l2_buffer.vb2_buf, state); + vsession->frame_bits_buf = NULL; + } + } + + if (vb != NULL) + vb2_buffer_done(vb, state); + } + + break; + } + case MVX_SESSION_EVENT_PORT_CHANGED: { + enum mvx_direction dir = (enum mvx_direction)arg; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + struct mvx_session_port *port = &session->port[dir]; + const struct v4l2_event event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION + }; + struct v4l2_pix_format_mplane *p = &vport->pix_mp; + unsigned int field = mvx_is_afbc(port->format) ? + V4L2_FIELD_SEQ_TB : V4L2_FIELD_INTERLACED; + unsigned int width = port->width; + unsigned int height = port->height; + if (dir == MVX_DIR_OUTPUT) { + port->width = port->new_width; + port->height = port->new_height; + width = port->width; + height = port->height; + port->pending_source_change_event = false; + } + + p->field = port->interlaced ? field : V4L2_FIELD_NONE; + if (dir == MVX_DIR_OUTPUT && mvx_is_afbc(port->format)) { + if (session->dual_afbc_downscaled) + width = session->port[dir].afbc_width_in_superblocks_downscaled << AFBC_SUPERBLOCK_SHIFT; + else + width = session->port[dir].afbc_width << AFBC_SUPERBLOCK_SHIFT; + height += session->port[dir].afbc_crop_top >> session->dual_afbc_downscaled; + } + + set_format(&vport->pix_mp, port->pixelformat, width, height, + port->nplanes, port->size, port->stride); + vport->afbc_crop_left = port->afbc_crop_left; + vport->afbc_crop_top = port->afbc_crop_top; + v4l2_event_queue_fh(&vsession->fh, &event); + break; + } + case MVX_SESSION_EVENT_COLOR_DESC: { + struct mvx_fw_color_desc *cd = &session->color_desc; + + set_video_signal_type(&vsession->port[MVX_DIR_OUTPUT].pix_mp, + cd->colour_primaries, cd->transfer_characteristics, + cd->matrix_coeff, cd->range); + vsession->port[MVX_DIR_INPUT].pix_mp.colorspace = vsession->port[MVX_DIR_OUTPUT].pix_mp.colorspace; + vsession->port[MVX_DIR_INPUT].pix_mp.xfer_func = vsession->port[MVX_DIR_OUTPUT].pix_mp.xfer_func; + vsession->port[MVX_DIR_INPUT].pix_mp.ycbcr_enc = vsession->port[MVX_DIR_OUTPUT].pix_mp.ycbcr_enc; + vsession->port[MVX_DIR_INPUT].pix_mp.quantization = vsession->port[MVX_DIR_OUTPUT].pix_mp.quantization; + break; + } + case MVX_SESSION_EVENT_ERROR: { + int i; + + for (i = 0; i < MVX_DIR_MAX; ++i) { + struct vb2_queue *q = &vsession->port[i].vb2_queue; + + if (vsession->port[i].q_set) + vb2_queue_error(q); + else + MVX_SESSION_WARN(&vsession->session, "vb2_queue has been released, dir %d", + vsession->port[i].dir); + } + + break; + } + default: + MVX_SESSION_WARN(&vsession->session, + "Unsupported session event. event=%d", event); + } +} + +int mvx_v4l2_session_construct(struct mvx_v4l2_session *vsession, + struct mvx_ext_if *ctx) +{ + int i; + int ret; + + vsession->ext = ctx; + mutex_init(&vsession->mutex); + init_completion(&vsession->cmp); + + for (i = 0; i < MVX_DIR_MAX; i++) { + struct mvx_v4l2_port *vport = &vsession->port[i]; + + vport->port = &vsession->session.port[i]; + vport->vsession = vsession; + vport->dir = i; + vport->q_set = false; + } + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = session_debugfs_init(vsession, ctx->dsessions); + if (ret != 0) + return ret; + } + + ret = mvx_session_construct(&vsession->session, ctx->dev, + ctx->client_ops, ctx->cache, + &vsession->mutex, + free_session, handle_event, + vsession->dentry, + ctx->is_encoder); + if (ret != 0) + goto remove_dentry; + + return 0; + +remove_dentry: + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(vsession->dentry); + + return ret; +} + +struct mvx_v4l2_session *v4l2_fh_to_session(struct v4l2_fh *fh) +{ + return container_of(fh, struct mvx_v4l2_session, fh); +} + +struct mvx_v4l2_session *file_to_session(struct file *file) +{ + return v4l2_fh_to_session(file->private_data); +} + +int mvx_v4l2_session_set_roi_regions(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_roi_regions *roi) +{ + int ret; + struct mvx_roi_config roi_regions; + roi_regions.pic_index = roi->pic_index; + roi_regions.num_roi = roi->num_roi; + roi_regions.qp_present = roi->qp_present; + roi_regions.roi_present = roi->roi_present; + roi_regions.qp = roi->qp; + + if (roi_regions.roi_present && roi_regions.num_roi > 0) { + int i = 0; + for (;i < roi_regions.num_roi; i++) { + roi_regions.roi[i].mbx_left = roi->roi[i].mbx_left; + roi_regions.roi[i].mbx_right = roi->roi[i].mbx_right; + roi_regions.roi[i].mby_top = roi->roi[i].mby_top; + roi_regions.roi[i].mby_bottom = roi->roi[i].mby_bottom; + roi_regions.roi[i].qp_delta = roi->roi[i].qp_delta; + roi_regions.roi[i].prio = roi->roi[i].prio; + roi_regions.roi[i].force_intra = roi->roi[i].force_intra; + } + } + ret = mvx_session_set_roi_regions(&vsession->session, &roi_regions); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_qp_epr(struct mvx_v4l2_session *vsession, + struct v4l2_buffer_param_qp *qp) +{ + int ret; + struct mvx_buffer_param_qp epr_qp; + epr_qp.qp = qp->qp; + epr_qp.epr_iframe_enable = qp->epr_iframe_enable; + ret = mvx_session_set_qp_epr(&vsession->session, &epr_qp); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_sei_userdata(struct mvx_v4l2_session *vsession, + struct v4l2_sei_user_data *sei_userdata) +{ + int ret; + struct mvx_sei_userdata userdata; + userdata.flags = sei_userdata->flags; + userdata.user_data_len = sei_userdata->user_data_len; + memcpy(&userdata.user_data, &sei_userdata->user_data, sizeof(userdata.user_data)); + memcpy(&userdata.uuid, &sei_userdata->uuid, sizeof(userdata.uuid)); + ret = mvx_session_set_sei_userdata(&vsession->session, &userdata); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_dsl_ratio(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_dsl_ratio *dsl) +{ + int ret; + struct mvx_dsl_ratio dsl_ratio; + dsl_ratio.hor = dsl->hor; + dsl_ratio.ver = dsl->ver; + + ret = mvx_session_set_dsl_ratio(&vsession->session, &dsl_ratio); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_long_term_ref(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_long_term_ref *ltr) +{ + int ret; + struct mvx_long_term_ref mvx_ltr; + mvx_ltr.mode = ltr->mode; + mvx_ltr.period = ltr->period; + ret = mvx_session_set_long_term_ref(&vsession->session, &mvx_ltr); + if (ret != 0) + return ret; + + return 0; + +} + +int mvx_v4l2_session_set_dsl_mode(struct mvx_v4l2_session *vsession, + int *mode) +{ + int ret; + ret = mvx_session_set_dsl_mode(&vsession->session, mode); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_mini_frame_cnt(struct mvx_v4l2_session *vsession, + int *cnt) +{ + int ret; + ret = mvx_session_set_mini_frame_cnt(&vsession->session, cnt); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_stats_mode(struct mvx_v4l2_session *vsession, + struct v4l2_buffer_param_enc_stats *stats) +{ + int ret; + struct mvx_enc_stats enc_stats; + memcpy(&enc_stats, stats, sizeof(struct mvx_enc_stats)); + ret = mvx_session_set_stats_mode(&vsession->session, &enc_stats); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_chr_cfg(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_chr_config *chr) +{ + int ret; + struct mvx_chr_cfg chr_cfg; + chr_cfg.pic_index = chr->pic_index; + chr_cfg.num_chr = chr->num_chr; + + memcpy(chr_cfg.rectangle, chr->rectangle, sizeof(chr->rectangle)); + ret = mvx_session_set_chr_cfg(&vsession->session, &chr_cfg); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_huff_table(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_huff_table *table) +{ + int ret; + struct mvx_huff_table huff_table; + + memcpy(&huff_table, table, sizeof(struct mvx_huff_table)); + ret = mvx_session_set_huff_table(&vsession->session, &huff_table); + if (ret != 0) + return ret; + + return 0; +} +int mvx_v4l2_session_set_seamless_target(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_seamless_target *seamless) +{ + int ret; + struct mvx_seamless_target seamless_target; + + memcpy(&seamless_target, seamless, sizeof(struct v4l2_mvx_seamless_target)); + ret = mvx_session_set_seamless_target(&vsession->session, &seamless_target); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_color_conv_coef(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_color_conv_coef *coef) +{ + int ret; + struct mvx_color_conv_coef conv_coef; + + memcpy(&conv_coef, coef, sizeof(struct mvx_color_conv_coef)); + ret = mvx_session_set_color_conversion_ceof(&vsession->session, &conv_coef); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_rgb_conv_yuv_coef(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_rgb2yuv_color_conv_coef *coef) +{ + int ret; + struct mvx_rgb2yuv_color_conv_coef conv_coef; + + memcpy(&conv_coef, coef, sizeof(struct mvx_rgb2yuv_color_conv_coef)); + ret = mvx_session_set_rgb_conv_yuv_coef(&vsession->session, &conv_coef); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_osd_config(struct mvx_v4l2_session *vsession, + struct v4l2_osd_config *osd_cfg) +{ + int ret; + struct mvx_osd_config osd; + + memcpy(&osd, osd_cfg, sizeof(struct v4l2_osd_config)); + ret = mvx_session_set_osd_config(&vsession->session, &osd); + if (ret != 0) + return ret; + + return 0; +} + +int mvx_v4l2_session_set_osd_info(struct mvx_v4l2_session *vsession, + struct v4l2_osd_info *info, enum mvx_format* osd_fmt) +{ + int ret; + int i; + struct mvx_osd_info osd_info; + for (i = 0; i < V4L2_MAX_FRAME_OSD_REGION; i++) + { + osd_info.width_osd[i] = info->width_osd[i]; + osd_info.height_osd[i] = info->height_osd[i]; + osd_info.inputFormat_osd[i] = osd_fmt[i]; + } + ret = mvx_session_set_osd_info(&vsession->session, &osd_info); + if (ret != 0) + return ret; + + return 0; +} + +void mvx_v4l2_session_try_color_info(struct mvx_v4l2_session *vsession, + struct v4l2_pix_format_mplane *pix) +{ + unsigned int colorspace = pix->colorspace; + unsigned int xfer_func = V4L2_XFER_FUNC_DEFAULT; + unsigned int ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + unsigned int quantization = V4L2_QUANTIZATION_DEFAULT; + + set_default_video_signal_type(pix->colorspace, + &xfer_func, &ycbcr_enc, &quantization); + + if (colorspace != V4L2_COLORSPACE_DEFAULT) { + if (get_u32_array_index(primaries_map, ARRAY_SIZE(primaries_map), colorspace) < 0 && + pix->colorspace != V4L2_COLORSPACE_SRGB) + pix->colorspace = V4L2_COLORSPACE_DEFAULT; + } + + if (pix->xfer_func != V4L2_XFER_FUNC_DEFAULT) { + if (get_u32_array_index(xfer_map, ARRAY_SIZE(xfer_map), pix->xfer_func) < 0) + pix->xfer_func = xfer_func; + } + + if (pix->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT) { + if (get_u32_array_index(matrix_map, ARRAY_SIZE(matrix_map), pix->ycbcr_enc) < 0) + pix->ycbcr_enc = ycbcr_enc; + } + + if (pix->quantization != V4L2_QUANTIZATION_DEFAULT) { + if (get_u32_array_index(range_map, ARRAY_SIZE(range_map), pix->quantization) < 0) + pix->quantization = quantization; + } +} + +int mvx_v4l2_session_set_color_info(struct mvx_v4l2_session *vsession, + struct v4l2_pix_format_mplane *pix) +{ + int ret; + struct mvx_fw_color_desc cd; + unsigned int flex_colorspace; + + ret = mvx_session_get_color_desc(&vsession->session, &cd); + if (ret != 0) + return ret; + + mvx_v4l2_session_try_color_info(vsession, pix); + + flex_colorspace = pix->colorspace == V4L2_COLORSPACE_SRGB ? + V4L2_COLORSPACE_REC709 : pix->colorspace; + cd.colour_primaries = + get_u32_array_index(primaries_map, ARRAY_SIZE(primaries_map), flex_colorspace); + cd.transfer_characteristics = + get_u32_array_index(xfer_map, ARRAY_SIZE(xfer_map), pix->xfer_func); + cd.matrix_coeff = + get_u32_array_index(matrix_map, ARRAY_SIZE(matrix_map), pix->ycbcr_enc); + cd.range = + get_u32_array_index(range_map, ARRAY_SIZE(range_map), pix->quantization); + + return mvx_session_set_color_desc(&vsession->session, &cd); +} + +int mvx_v4l2_session_set_enc_lambda_scale(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_lambda_scale *lambda_scale) +{ + int ret; + struct mvx_lambda_scale mvx_lambda_scale; + + memcpy(&mvx_lambda_scale, lambda_scale, sizeof(struct mvx_lambda_scale)); + ret = mvx_session_set_enc_lambda_scale(&vsession->session, &mvx_lambda_scale); + if (ret != 0) + return ret; + + return 0; +} + +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE +int mvx_v4l2_session_get_hdr10_cll_info(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_cll_info *hdr) +{ + int ret; + struct mvx_fw_color_desc cd; + + ret = mvx_session_get_color_desc(&vsession->session, &cd); + if (ret != 0) + return ret; + + if (!(cd.flags & MVX_FW_COLOR_DESC_CONTENT_VALID)) { + memset(hdr, 0, sizeof(*hdr)); + return 0; + } + + hdr->max_content_light_level = cd.content.luminance_max; + hdr->max_pic_average_light_level = cd.content.luminance_average; + + return 0; +} + +int mvx_v4l2_session_set_hdr10_cll_info(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_cll_info *hdr) +{ + int ret; + struct mvx_fw_color_desc cd; + + if (hdr->max_content_light_level == 0 && hdr->max_pic_average_light_level == 0) + return 0; + + ret = mvx_session_get_color_desc(&vsession->session, &cd); + if (ret != 0) + return ret; + + cd.flags |= MVX_FW_COLOR_DESC_CONTENT_VALID; + cd.content.luminance_max = hdr->max_content_light_level; + cd.content.luminance_average = hdr->max_pic_average_light_level; + + return mvx_session_set_color_desc(&vsession->session, &cd); +} + +int mvx_v4l2_session_get_hdr10_mastering(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_mastering_display *hdr) +{ + int ret; + struct mvx_fw_color_desc cd; + + ret = mvx_session_get_color_desc(&vsession->session, &cd); + if (ret != 0) + return ret; + + if (!(cd.flags & MVX_FW_COLOR_DESC_DISPLAY_VALID)) { + memset(hdr, 0, sizeof(*hdr)); + return 0; + } + + hdr->display_primaries_x[0] = cd.display.r.x; + hdr->display_primaries_y[0] = cd.display.r.y; + hdr->display_primaries_x[1] = cd.display.g.x; + hdr->display_primaries_y[1] = cd.display.g.y; + hdr->display_primaries_x[2] = cd.display.b.x; + hdr->display_primaries_y[2] = cd.display.b.y; + hdr->white_point_x = cd.display.w.x; + hdr->white_point_y = cd.display.w.y; + hdr->min_display_mastering_luminance = cd.display.luminance_min; + hdr->max_display_mastering_luminance = cd.display.luminance_max; + + return 0; +} + +int mvx_v4l2_session_set_hdr10_mastering(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_mastering_display *hdr) +{ + int ret; + struct mvx_fw_color_desc cd; + + if (hdr->display_primaries_x[0] == 0 || hdr->display_primaries_y[0] == 0 || + hdr->display_primaries_x[1] == 0 || hdr->display_primaries_y[1] == 0 || + hdr->display_primaries_x[2] == 0 || hdr->display_primaries_y[2] == 0 || + hdr->white_point_x == 0 || hdr->white_point_y == 0 || + hdr->min_display_mastering_luminance == 0 || + hdr->max_display_mastering_luminance == 0) + return 0; + + ret = mvx_session_get_color_desc(&vsession->session, &cd); + if (ret != 0) + return ret; + + cd.flags |= MVX_FW_COLOR_DESC_DISPLAY_VALID; + cd.display.r.x = hdr->display_primaries_x[0]; + cd.display.r.y = hdr->display_primaries_y[0]; + cd.display.g.x = hdr->display_primaries_x[1]; + cd.display.g.y = hdr->display_primaries_y[1]; + cd.display.b.x = hdr->display_primaries_x[2]; + cd.display.b.y = hdr->display_primaries_y[2]; + cd.display.w.x = hdr->white_point_x; + cd.display.w.y = hdr->white_point_y; + cd.display.luminance_min = hdr->min_display_mastering_luminance; + cd.display.luminance_max = hdr->max_display_mastering_luminance; + + return mvx_session_set_color_desc(&vsession->session, &cd); +} +#endif diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_session.h b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_session.h new file mode 100755 index 000000000000..ab7201a08ff0 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_session.h @@ -0,0 +1,264 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_V4L2_SESSION_H_ +#define _MVX_V4L2_SESSION_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mvx_session.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +/** + * Offset used to distinguish between input and output port. + */ +#define DST_QUEUE_OFF_BASE (1 << 30) + +/** + * Maximum of framerate that VPU can support. + */ +#define MAX_FRAME_RATE 256 + +/** + * struct mvx_v4l2_port - V4L2 port type. + * + * Most of this structure will become redundant when buffer management + * is transferred to Vb2 framework. + * + * @vsession: Pointer to corresponding session. + * @port: Pointer to corresponding mvx port. + * @dir: Direction of a port. + * @type: V4L2 port type. + * @pix_mp: V4L2 multi planar pixel format. + * @afbc_crop_left: AFBC frame buffer left crop of active region. + * @afbc_crop_top: AFBC frame buffer top crop of active region. + * @dentry: Debugfs directory entry for the port. + * @q_set: Indicates of Vb2 queue was setup. + * @vb2_queue: Vb2 queue. + */ +struct mvx_v4l2_port { + struct mvx_v4l2_session *vsession; + struct mvx_session_port *port; + enum mvx_direction dir; + enum v4l2_buf_type type; + struct v4l2_pix_format_mplane pix_mp; + unsigned int afbc_crop_left; + unsigned int afbc_crop_top; + struct dentry *dentry; + bool q_set; + struct vb2_queue vb2_queue; +}; + +/** + * struct mvx_v4l2_session - V4L2 session type. + * @ext: Pointer to external interface object. + * @fh: V4L2 file handler. + * @mutex: Mutex protecting the session object. + * @session: Session object. + * @port: Array of v4l2 ports. + * @dentry: Debugfs directory entry representing a session. + * @v4l2_ctrl: v4l2 controls handler. + */ +struct mvx_v4l2_session { + struct mvx_ext_if *ext; + struct v4l2_fh fh; + struct mutex mutex; + struct mvx_session session; + struct mvx_v4l2_port port[MVX_DIR_MAX]; + struct dentry *dentry; + struct v4l2_ctrl_handler v4l2_ctrl; + struct completion cmp; + struct mvx_v4l2_buffer *frame_bits_buf; + bool first_input_processed; +}; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_v4l2_session_construct() - Construct v4l2 session object. + * @vsession: Pointer to a session object. + * @ctx: Pointer to an external interface object. + * + * Return: 0 on success, else error code. + */ +int mvx_v4l2_session_construct(struct mvx_v4l2_session *vsession, + struct mvx_ext_if *ctx); + +/** + * v4l2_fh_to_session() - Cast v4l2 file handler to mvx_v4l2_session. + * @fh: v4l2 file handler. + * + * Return: Pointer to a corresponding mvx_v4l2_session object. + */ +struct mvx_v4l2_session *v4l2_fh_to_session(struct v4l2_fh *fh); + +/** + * file_to_session() - Cast file object to mvx_v4l2_session. + * @file: Pointer to a file object. + * + * Return: Pointer to a corresponding mvx_v4l2_session object. + */ +struct mvx_v4l2_session *file_to_session(struct file *file); + +/** + * mvx_v4l2_session_set_sei_userdata() - Set SEI userdata. + * @vsession: Pointer to v4l2 session. + * @sei_userdata: SEI userdata. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_sei_userdata(struct mvx_v4l2_session *vsession, + struct v4l2_sei_user_data *sei_userdata); + +/** + * mvx_v4l2_session_set_roi_regions() - Set Roi Regions. + * @vsession: Pointer to v4l2 session. + * @roi: ROI regions. + * + * Return: 0 on success, else error code. + */ +int mvx_v4l2_session_set_roi_regions(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_roi_regions *roi); + +/** + * mvx_v4l2_session_set_qp_epr() - Set qp. + * @vsession: Pointer to v4l2 session. + * @qp: qp value. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_qp_epr(struct mvx_v4l2_session *vsession, + struct v4l2_buffer_param_qp *qp); + +/** + * mvx_v4l2_session_set_dsl_ratio() - Set DownScale ratio. + * @vsession: Pointer to v4l2 session. + * @dsl: DownScale ratio. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_dsl_ratio(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_dsl_ratio *dsl); + +/** + * mvx_v4l2_session_set_long_term_ref() - Set long term ref. + * @vsession: Pointer to v4l2 session. + * @ltr: long term ref. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_long_term_ref(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_long_term_ref *ltr); + +/** + * mvx_v4l2_session_set_dsl_mode() - Set DownScale mode. + * @vsession: Pointer to v4l2 session. + * @mode: DownScale mode, oly enable on high precision mode. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_dsl_mode(struct mvx_v4l2_session *vsession, + int *mode); + +/** + * mvx_v4l2_session_set_mini_frame_cnt() - Set DownScale mode. + * @vsession: Pointer to v4l2 session. + * @cnt: Mini Frame buffer cnt. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_mini_frame_cnt(struct mvx_v4l2_session *vsession, + int *cnt); + +/** + * mvx_v4l2_session_set_stats_mode() - Set Stats mode. + * @vsession: Pointer to v4l2 session. + * @mode: stats mode. + * + * Return: 0 on success, else error code. + */ + +int mvx_v4l2_session_set_stats_mode(struct mvx_v4l2_session *vsession, + struct v4l2_buffer_param_enc_stats *stats); +int mvx_v4l2_session_set_chr_cfg(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_chr_config *chr); +int mvx_v4l2_session_set_huff_table(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_huff_table *table); +int mvx_v4l2_session_set_seamless_target(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_seamless_target *seamless); +int mvx_v4l2_session_set_color_conv_coef(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_color_conv_coef *coef); +int mvx_v4l2_session_set_rgb_conv_yuv_coef(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_rgb2yuv_color_conv_coef *coef); +int mvx_v4l2_session_set_osd_config(struct mvx_v4l2_session *vsession, + struct v4l2_osd_config *osd_cfg); +int mvx_v4l2_session_set_osd_info(struct mvx_v4l2_session *vsession, + struct v4l2_osd_info *info, enum mvx_format* osd_fmt); +void mvx_v4l2_session_try_color_info(struct mvx_v4l2_session *vsession, + struct v4l2_pix_format_mplane *pix); +int mvx_v4l2_session_set_color_info(struct mvx_v4l2_session *vsession, + struct v4l2_pix_format_mplane *pix); +int mvx_v4l2_session_set_enc_lambda_scale(struct mvx_v4l2_session *vsession, + struct v4l2_mvx_lambda_scale *lambda_scale); +#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE +int mvx_v4l2_session_get_hdr10_cll_info(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_cll_info *hdr); +int mvx_v4l2_session_set_hdr10_cll_info(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_cll_info *hdr); +int mvx_v4l2_session_get_hdr10_mastering(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_mastering_display *hdr); +int mvx_v4l2_session_set_hdr10_mastering(struct mvx_v4l2_session *vsession, + struct v4l2_ctrl_hdr10_mastering_display *hdr); +#endif + +#endif /* _MVX_V4L2_SESSION_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_vidioc.c b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_vidioc.c new file mode 100755 index 000000000000..a33fbef380a2 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_vidioc.c @@ -0,0 +1,2078 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvx_bitops.h" +#include "mvx_ext_if.h" +#include "mvx_if.h" +#include "mvx_v4l2_buffer.h" +#include "mvx_v4l2_session.h" +#include "mvx_v4l2_vidioc.h" + +/**************************************************************************** + * Types + ****************************************************************************/ + +/**************************************************************************** + * Static functions and variables + ****************************************************************************/ +static int to_v4l2_format(struct v4l2_format *f, + enum v4l2_buf_type type, + struct v4l2_pix_format_mplane *pix, + unsigned int *stride, + unsigned int *size, + bool interlaced) +{ + struct mvx_session_format_map *map; + unsigned int field; + + map = mvx_session_find_format(pix->pixelformat); + if (IS_ERR(map)) + return PTR_ERR(map); + field = mvx_is_afbc(map->format) ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_INTERLACED; + + f->type = type; + + switch (f->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + case V4L2_BUF_TYPE_VIDEO_CAPTURE: { + struct v4l2_pix_format *p = &f->fmt.pix; + uint32_t i; + + p->width = pix->width; + p->height = pix->height; + p->pixelformat = pix->pixelformat; + p->field = interlaced ? field : V4L2_FIELD_NONE; + p->colorspace = pix->colorspace; + p->flags = pix->flags; + p->ycbcr_enc = pix->ycbcr_enc; + p->quantization = pix->quantization; + p->xfer_func = pix->xfer_func; + + p->sizeimage = 0; + p->bytesperline = stride[0]; + for (i = 0; i < pix->num_planes; ++i) + p->sizeimage += size[i]; + + break; + } + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: { + struct v4l2_pix_format_mplane *p = &f->fmt.pix_mp; + int i; + + memcpy(p, pix, sizeof(*p)); + memset(p->reserved, 0, sizeof(p->reserved)); + p->field = interlaced ? field : V4L2_FIELD_NONE; + + for (i = 0; i < pix->num_planes; i++) { + p->plane_fmt[i].bytesperline = stride[i]; + p->plane_fmt[i].sizeimage = size[i]; + memset(p->plane_fmt[i].reserved, 0, + sizeof(p->plane_fmt[i].reserved)); + } + + break; + } + default: + return -EINVAL; + } + + return 0; +} + +static int from_v4l2_format(struct mvx_v4l2_session *vsession, + struct v4l2_format *f, + struct v4l2_pix_format_mplane *pix, + enum mvx_format *format, + unsigned int *stride, + unsigned int *size, + bool *interlaced) +{ + struct mvx_session_format_map *map; + + switch (f->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + case V4L2_BUF_TYPE_VIDEO_CAPTURE: { + struct v4l2_pix_format *p = &f->fmt.pix; + + memset(pix, 0, sizeof(*pix)); + + pix->width = p->width; + pix->height = p->height; + pix->pixelformat = p->pixelformat; + pix->field = p->field; + pix->colorspace = p->colorspace; + pix->flags = p->flags; + + if (p->priv != V4L2_PIX_FMT_PRIV_MAGIC) { + pix->ycbcr_enc = V4L2_COLORSPACE_DEFAULT; + pix->quantization = V4L2_QUANTIZATION_DEFAULT; + pix->xfer_func = V4L2_XFER_FUNC_DEFAULT; + } + + pix->num_planes = 1; + pix->plane_fmt[0].sizeimage = p->sizeimage; + pix->plane_fmt[0].bytesperline = p->bytesperline; + + size[0] = p->sizeimage; + stride[0] = p->bytesperline; + + break; + } + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: { + struct v4l2_pix_format_mplane *p = &f->fmt.pix_mp; + unsigned int i; + + if (p->num_planes > MVX_BUFFER_NPLANES) + MVX_SESSION_WARN(&vsession->session, + "Too many planes for format. format=0x%08x, num_planes=%u.", + pix->pixelformat, p->num_planes); + + memcpy(pix, p, sizeof(*pix)); + + for (i = 0; + i < min_t(unsigned int, MVX_BUFFER_NPLANES, p->num_planes); + i++) { + size[i] = p->plane_fmt[i].sizeimage; + stride[i] = p->plane_fmt[i].bytesperline; + vsession->session.setting_stride[i] = stride[i]; + } + + break; + } + default: + return -EINVAL; + } + + /* Adjust default field and color spaces. */ + + if (pix->field == V4L2_FIELD_NONE || pix->field == V4L2_FIELD_ANY) { + *interlaced = false; + } else { + *interlaced = true; + } + + /* Find mapping between pixel format and mvx format. */ + map = mvx_session_find_format(pix->pixelformat); + if (IS_ERR(map)) { + MVX_SESSION_INFO(&vsession->session, + "Unsupported V4L2 pixel format. format=0x%08x.", + pix->pixelformat); + return PTR_ERR(map); + } + + *format = map->format; + + return 0; +} + +/** + * print_format() - Print V4L2 format. + * @session: Pointer to MVX session. + * @f: V4L2 format. + * @prefix: Prefix string. + */ +static void print_format(struct mvx_session *session, + struct v4l2_format *f, + const char *prefix) +{ + if (V4L2_TYPE_IS_MULTIPLANAR(f->type) != false) { + struct v4l2_pix_format_mplane *p = &f->fmt.pix_mp; + + MVX_SESSION_INFO(session, + "v4l2: %s. type=%u, pixelformat=0x%08x, width=%u, height=%u, num_planes=%u", + prefix, + f->type, p->pixelformat, + p->width, p->height, + p->num_planes); + MVX_SESSION_INFO(session, + "v4l2: %s. colorspace=%u, ycbcr_enc=%u, xfer_func=%u, quantization=%u", + prefix, + p->colorspace, p->ycbcr_enc, + p->xfer_func, p->quantization); + } else { + struct v4l2_pix_format *p = &f->fmt.pix; + + MVX_SESSION_INFO(session, + "v4l2: %s. type=%u, pixelformat=0x%08x, width=%u, height=%u.", + prefix, + f->type, p->pixelformat, + p->width, p->height); + MVX_SESSION_INFO(session, + "v4l2: %s. colorspace=%u, ycbcr_enc=%u, xfer_func=%u, quantization=%u", + prefix, + p->colorspace, p->ycbcr_enc, + p->xfer_func, p->quantization); + } +} + +/** + * dump_input_data_to_local() - dump input buffer. + * @session: Pointer to MVX session. + * @vb: Buffer to dump. + */ +static void dump_input_data_to_local(struct mvx_session *session, + struct vb2_buffer *vb) +{ + struct mvx_session_port *input = &session->port[MVX_DIR_INPUT]; + bool is_ivf = (input->format == MVX_FORMAT_AV1 || + input->format == MVX_FORMAT_VP8 || + input->format == MVX_FORMAT_VP9); + void *buffer = (void *)vb2_plane_vaddr(vb, 0); + unsigned int size = vb->planes[0].bytesused; + + if (is_ivf) { + struct mvx_ivf_frame ivf_frame = { + .size = size, + .timestamp = input->dump_count + }; + uint8_t *ivf_frame_head = (uint8_t*)(&ivf_frame); + kernel_write(session->data_fp, ivf_frame_head, sizeof(struct mvx_ivf_frame), &(session->data_fp->f_pos)); + } + kernel_write(session->data_fp, buffer, size, &(session->data_fp->f_pos)); +} + +/** + * queue_setup() - Initialize or verify queue parameters. + * @q: Videobuf2 queue. + * @buf_cnt: Requested/requered buffers count. + * @plane_cnt: Required number of planes. + * @plane_size: Required size of each plane. + * @alloc_devs: Device to allocate memory from. + * + * This callback is used to query parameters of a queue from the driver. + * Vb2 sets buf_cnt to requested amount of buffers, but a driver is free to + * choose another value and return it. Vb2 will then call queue_setup() again + * to verify that the new value is accepted by a driver. + * + * Vb2 also uses plane_cnt parameter to signal if queue_setup() was called + * from create_bufs() of reqbufs(). + * + * No locking is required in this function. The reason is that will be called + * from within vb2_reqbufs() or vb2_create_bufs() which are executed from our + * code with session mutex already taken. + * + * Return: 0 on success, else error code. + */ +#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE +static int queue_setup(struct vb2_queue *q, + unsigned int *buf_cnt, + unsigned int *plane_cnt, + unsigned int plane_size[], + struct device *alloc_devs[]) +#else +static int queue_setup(struct vb2_queue *q, + const void *unused, + unsigned int *buf_cnt, + unsigned int *plane_cnt, + unsigned int plane_size[], + void *alloc_devs[]) +#endif +{ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_session_port *port = vport->port; + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + unsigned int i, port_format_bpp, display_size_format_bpp; + + /* + * If the output frame resolution is not known, then there is no need + * to allocate buffers yet. But 1 buffer will be needed to carry + * information about 'resolution change' and 'end of stream'. + */ + if (vport->dir == MVX_DIR_OUTPUT && + mvx_is_frame(port->format) != false && + (port->width == 0 || port->height == 0)) + *buf_cnt = 1; + + memset(plane_size, 0, sizeof(plane_size[0]) * VB2_MAX_PLANES); + *plane_cnt = port->nplanes; + port_format_bpp = mvx_get_format_bpp(port->format); + display_size_format_bpp = mvx_get_format_bpp(port->display_size_format); + for (i = 0; i < port->nplanes; ++i) { + unsigned int tmp_size; + /* Vb2 allocator does not handle well buffers of zero size. */ + plane_size[i] = max_t(unsigned int, port->size[i], 1); + if (vport->dir == MVX_DIR_OUTPUT && + (port_format_bpp != 0 && display_size_format_bpp != 0) && + (session->dsl_frame.width < 16 && session->dsl_frame.height < 16) && + (session->crop.crop_en == 0)) { + tmp_size = (unsigned int)((uint64_t)port->display_size[i] * port_format_bpp / display_size_format_bpp); + plane_size[i] = max_t(unsigned int, tmp_size, plane_size[i]); + } + if (session->seamless_target.seamless_mode != 0 && + vport->dir == MVX_DIR_OUTPUT && + vsession->port[MVX_DIR_INPUT].port->format <= MVX_FORMAT_BITSTREAM_LAST) { + plane_size[i] = plane_size[i] < session->seamless_target.target_size[i] ? session->seamless_target.target_size[i] : plane_size[i]; + } + alloc_devs[i] = session->dev; + } + + MVX_SESSION_VERBOSE(session, + "queue_setup. vsession=%px, vport=%px, vb2_queue=%px, dir=%d, format=0x%x, width=%u, height=%u, nplanes=%u, plane_size=[%u, %u, %u]", + vsession, vport, q, vport->dir, port->format, + port->width, port->height, port->nplanes, + plane_size[0], plane_size[1], plane_size[2]); + + return 0; +} + +/** + * buf_init() - Perform initilization for Vb2 buffer. + * @b: Pointer to Vb2 buffer. + * + * Vb2 framework calls this function once for every allocated buffer. + * A driver fetches a list of memory pages and constructs MVX V4L2 buffers. + * + * No locking is required in this function. The reason is that will be called + * from within vb2_reqbufs() or vb2_create_bufs() which are executed from our + * code with session mutex already taken. + * + * Return: 0 in case of success, error code otherwise. + */ +static int buf_init(struct vb2_buffer *b) +{ + struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(b); + + int ret; + unsigned int i; + struct sg_table *sgt[MVX_BUFFER_NPLANES] = { 0 }; + struct vb2_queue *q = b->vb2_queue; + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + + MVX_SESSION_VERBOSE(session, + "v4l2: Initialize buffer. vb=%px, type=%u, index=%u, num_planes=%u.", + b, b->type, b->index, b->num_planes); + + if (b->num_planes > MVX_BUFFER_NPLANES) { + MVX_SESSION_WARN(session, + "Failed to initialize buffer. Too many planes. vb=%px, num_planes=%u.", + b, b->num_planes); + return -EINVAL; + } + + for (i = 0; i < b->num_planes; ++i) { + sgt[i] = vb2_dma_sg_plane_desc(b, i); + if (sgt[i] == NULL) { + MVX_SESSION_WARN(session, + "Cannot fetch SG descriptor. vb=%px, plane=%u.", + b, i); + return -ENOMEM; + } + } + + ret = mvx_v4l2_buffer_construct(vbuf, vsession, vport->dir, + b->num_planes, sgt); + + return ret; +} + +/** + * buf_cleanup() - Destroy data associated to Vb2 buffer. + * @b: Pointer to Vb2 buffer. + * + * Vb2 framework calls this function while destroying a buffer. + */ +static void buf_cleanup(struct vb2_buffer *b) +{ + struct vb2_queue *q = b->vb2_queue; + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(b); + + MVX_SESSION_VERBOSE(session, + "v4l2: Cleanup buffer. vb=%px, index=%u, vbuf=%px, type=%u.", + b, b->type, b->index, vbuf); + + mvx_v4l2_buffer_destruct(vbuf); +} + +/** + * start_streaming() - Start streaming for queue. + * @q: Pointer to a queue. + * @cnt: Amount of buffers already owned by a driver. + * + * Vb2 calls this function when it is ready to start streaming for a queue. + * Vb2 ensures that minimum required amount of buffers were enqueued to the + * driver before calling this function. + * + * Return: 0 in case of success, error code otherwise. + */ +static int start_streaming(struct vb2_queue *q, + unsigned int cnt) +{ + /* + * Parameter cnt is not used so far. + */ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + int ret; + + MVX_SESSION_VERBOSE(session, + "v4l2: Start streaming. queue=%px, type=%u, cnt=%u.", + q, q->type, cnt); + + ret = mvx_session_streamon(&vsession->session, vport->dir); + + /* + * If attempt was not successful, we should return all owned buffers + * to Vb2 with vb2_buffer_done() with state VB2_BUF_STATE_QUEUED. + */ + if (ret != 0 && atomic_read(&q->owned_by_drv_count) > 0) { + int i; + + for (i = 0; i < q->num_buffers; ++i) + if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) + vb2_buffer_done(q->bufs[i], + VB2_BUF_STATE_QUEUED); + + WARN_ON(atomic_read(&q->owned_by_drv_count)); + } + + return ret; +} + +/** + * stop_streaming() - Stop streaming for a queue. + * @q: Pointer to a queue. + * + * Vb2 calls this function when streaming should be terminated. + * The driver must ensure that no DMA transfers are ongoing and + * return all buffers to Vb2 with vb2_buffer_done(). + */ +static void stop_streaming(struct vb2_queue *q) +{ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + + MVX_SESSION_VERBOSE(session, + "v4l2: Stop streaming. queue=%px, type=%u.", + q, q->type); + + mvx_session_streamoff(&vsession->session, vport->dir); + + /* + * We have to return all owned buffers to Vb2 before exiting from + * this callback. + * + * Note: there must be no access to buffers after they are returned. + */ + if (atomic_read(&q->owned_by_drv_count) > 0) { + int i; + + for (i = 0; i < q->num_buffers; ++i) + if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) + vb2_buffer_done(q->bufs[i], + VB2_BUF_STATE_ERROR); + + WARN_ON(atomic_read(&q->owned_by_drv_count)); + } +} + +/** + * buf_queue() - Enqueue buffer to a driver. + * @b: Pointer to Vb2 buffer structure. + * + * Vb2 calls this function to enqueue a buffer to a driver. + * A driver should later return a buffer to Vb2 with vb2_buffer_done(). + * + * Return: 0 in case of success, error code otherwise. + */ +static void buf_queue(struct vb2_buffer *b) +{ + struct vb2_queue *q = b->vb2_queue; + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + enum mvx_direction dir = vport->dir; + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(b); + + int ret; + + MVX_SESSION_VERBOSE(session, + "v4l2: Queue buffer. b=%px, type=%u, index=%u.", + b, b->type, b->index); + if (!vsession->first_input_processed && b->planes[0].bytesused >= 4) { + if (dir == MVX_DIR_INPUT && vport->port->format == MVX_FORMAT_JPEG) { + uint32_t *data = vb2_plane_vaddr(b, 0) + b->planes[0].data_offset; + if (*data == v4l2_fourcc('A', 'V', 'I', 'F')) { + /* Not a valid bitstream buffer, return it to client */ + MVX_SESSION_INFO(session, + "v4l2: Skip invalid bitstream buffer, offset = %d, size = %d", + b->planes[0].data_offset, b->planes[0].bytesused); + b->planes[0].data_offset += b->planes[0].bytesused; + b->planes[0].bytesused = 0; + vb2_buffer_done(b, VB2_BUF_STATE_DONE); + return; + } + } + vsession->first_input_processed = true; + } + vbuf->buf.format = vport->port->format; + if (vsession->session.force_key_frame && dir == MVX_DIR_INPUT) { + struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(b); + + vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME; + mvx_session_set_force_key_frame(&vsession->session, 0); + } + ret = mvx_v4l2_buffer_set(vbuf, b); + if (ret != 0) { + goto failed; + } + if (dir == MVX_DIR_INPUT && session->data_fp != NULL) { + dump_input_data_to_local(session, b); + session->port[dir].dump_count++; + } + ret = mvx_session_qbuf(&vsession->session, dir, &vbuf->buf); + if (ret != 0) { + goto failed; + } + return; + +failed: + if (vbuf->buf.flags & MVX_BUFFER_FRAME_NEED_REALLOC) { + vbuf->vb2_v4l2_buffer.flags |= V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC; + vb2_buffer_done(b, VB2_BUF_STATE_DONE); + return; + } + vb2_buffer_done(b, VB2_BUF_STATE_ERROR); +} + +/** + * buf_finish() - Finish buffer before it is returned to user space. + * @vb: Pointer to Vb2 buffer structure. + */ +static void buf_finish(struct vb2_buffer *vb) +{ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(vb->vb2_queue); + struct mvx_v4l2_buffer *vbuf = vb2_to_mvx_v4l2_buffer(vb); + + if (vbuf->buf.planes[0].filled > 0) { + vport->afbc_crop_left = vbuf->buf.crop_left; + vport->afbc_crop_top = vbuf->buf.crop_top; + } +} + +/** + * wait_prepare() - Prepare driver for waiting + * @q: Pointer to Vb2 queue. + * + * Vb2 calls this function when it is about to wait for more buffers to + * be received. A driver should release any locks taken while calling Vb2 + * functions. + * This is required to avoid a deadlock. + * + * This is unused for now and will be called from Vb2. + */ +static void wait_prepare(struct vb2_queue *q) +{ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + + MVX_SESSION_VERBOSE(session, "v4l2: Wait prepare. queue=%px.", q); + + mutex_unlock(&vsession->mutex); +} + +/** + * wait_finish() - Wake up after sleep. + * @q: Pointer to Vb2 queue. + * + * Require mutexes release before. + * + * This is unused for now and will be called from Vb2. + */ +static void wait_finish(struct vb2_queue *q) +{ + struct mvx_v4l2_port *vport = vb2_get_drv_priv(q); + struct mvx_v4l2_session *vsession = vport->vsession; + struct mvx_session *session = &vsession->session; + int ignore; + + MVX_SESSION_VERBOSE(session, "v4l2: Wait finish. queue=%px.", q); + + /* + * mutex_lock_interruptible is declared with attribute + * warn_unused_result, but we have no way to return a status + * from wait_finish(). + */ + ignore = mutex_lock_interruptible(&vsession->mutex); +} + +/** + * mvx_vb2_ops - Callbacks for Vb2 framework + * Not all possible callbacks are implemented as some of them are optional. + */ +const struct vb2_ops mvx_vb2_ops = { + .queue_setup = queue_setup, + .buf_init = buf_init, + .buf_finish = buf_finish, + .buf_cleanup = buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .buf_queue = buf_queue, + .wait_prepare = wait_prepare, + .wait_finish = wait_finish +}; + +/** + * setup_vb2_queue() - Initialize vb2_queue before it can be used by Vb2. + */ +static int setup_vb2_queue(struct mvx_v4l2_port *vport) +{ + struct vb2_queue *q = &vport->vb2_queue; +#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE + struct device *dev = vport->vsession->ext->dev; +#endif + int ret; + + q->drv_priv = vport; + q->type = vport->type; + q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; +#if KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE + q->dev = dev; +#endif + q->ops = &mvx_vb2_ops; + q->mem_ops = &vb2_dma_sg_memops; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + q->allow_zero_bytesused = true; + if (vport->dir == MVX_DIR_OUTPUT && mvx_is_frame(vport->port->format)) + q->min_buffers_needed = 0; + else + q->min_buffers_needed = 1; + + /* Let Vb2 handle mvx_v4l2_buffer allocations. */ + q->buf_struct_size = sizeof(struct mvx_v4l2_buffer); + + ret = vb2_queue_init(q); + + return ret; +} + +/**************************************************************************** + * Exported functions and variables + ****************************************************************************/ + +int mvx_v4l2_vidioc_querycap(struct file *file, + void *fh, + struct v4l2_capability *cap) +{ + struct mvx_v4l2_session *session = file_to_session(file); + + MVX_SESSION_INFO(&session->session, "v4l2: Query capabilities."); + + strlcpy(cap->driver, "mvx", sizeof(cap->driver)); + strlcpy(cap->card, "Linlon Video device", sizeof(cap->card)); + strlcpy(cap->bus_info, "platform:mvx", sizeof(cap->bus_info)); + + cap->capabilities = V4L2_CAP_DEVICE_CAPS | + V4L2_CAP_VIDEO_M2M | + V4L2_CAP_VIDEO_M2M_MPLANE | + V4L2_CAP_EXT_PIX_FORMAT | + V4L2_CAP_STREAMING; + cap->device_caps = cap->capabilities & ~V4L2_CAP_DEVICE_CAPS; + + return 0; +} + +static int mvx_v4l2_vidioc_enum_fmt_vid(struct mvx_v4l2_session *session, + struct v4l2_fmtdesc *f, + enum mvx_direction dir) +{ + struct mvx_session_format_map *mvx_fmt = NULL; + + mvx_fmt = mvx_session_enum_format(&session->session, dir, f->index); + if (!mvx_fmt) + return -EINVAL; + + f->flags = mvx_fmt->flags; + f->pixelformat = mvx_fmt->pixelformat; + strlcpy(f->description, mvx_fmt->description, + sizeof(f->description)); + + return 0; +} + +int mvx_v4l2_vidioc_enum_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_fmtdesc *f) +{ + struct mvx_v4l2_session *session = file_to_session(file); + int ret; + + ret = mvx_v4l2_vidioc_enum_fmt_vid(session, f, MVX_DIR_OUTPUT); + + return ret; +} + +int mvx_v4l2_vidioc_enum_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_fmtdesc *f) +{ + struct mvx_v4l2_session *session = file_to_session(file); + int ret; + + ret = mvx_v4l2_vidioc_enum_fmt_vid(session, f, MVX_DIR_INPUT); + + return ret; +} + +int mvx_v4l2_vidioc_enum_framesizes(struct file *file, + void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct mvx_session_format_map *format; + struct mvx_ext_if *ctx = video_drvdata(file); + struct mvx_v4l2_session *vsession = file_to_session(file); + + /* Verify that format is supported. */ + format = mvx_session_find_format(fsize->pixel_format); + if (IS_ERR(format)) + return PTR_ERR(format); + + /* + * For uncompressed format, check the corresponding compressed format + * in the other port to get max/min resolution. + */ + if (format->format >= MVX_FORMAT_FRAME_FIRST) { + struct mvx_session_format_map *bits_format; + bits_format = mvx_session_get_compressed_format(&vsession->session); + if (!IS_ERR(bits_format)) + format = bits_format; + } + + /* For stepwise/continuous frame size the index must be 0. */ + if (fsize->index != 0) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + mvx_session_enum_framesizes(&vsession->session, ctx->is_encoder, format->format, + &(fsize->stepwise.min_width), &(fsize->stepwise.min_height), + &(fsize->stepwise.max_width), &(fsize->stepwise.max_height), + &(fsize->stepwise.step_width), &(fsize->stepwise.step_height)); + + return 0; +} + +static void mvx_v4l2_copy_color_desc(struct v4l2_pix_format_mplane *dst, + struct v4l2_pix_format_mplane *src) +{ + dst->colorspace = src->colorspace; + dst->xfer_func = src->xfer_func; + dst->ycbcr_enc = src->ycbcr_enc; + dst->quantization = src->quantization; +} + +static int mvx_v4l2_vidioc_g_fmt_vid(struct file *file, + struct v4l2_format *f, + enum mvx_direction dir) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct mvx_v4l2_port *vport = &vsession->port[dir]; + struct mvx_session_port *port = &vsession->session.port[dir]; + int ret; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + if (dir == MVX_DIR_OUTPUT && vsession->ext->is_encoder) + mvx_v4l2_copy_color_desc(&vport->pix_mp, &vsession->port[MVX_DIR_INPUT].pix_mp); + + to_v4l2_format(f, f->type, &vport->pix_mp, port->stride, port->size, + port->interlaced); + + mutex_unlock(&vsession->mutex); + + print_format(&vsession->session, f, "Get format"); + + return 0; +} + +int mvx_v4l2_vidioc_g_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_format *f) +{ + return mvx_v4l2_vidioc_g_fmt_vid(file, f, MVX_DIR_OUTPUT); +} + +int mvx_v4l2_vidioc_g_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_format *f) +{ + return mvx_v4l2_vidioc_g_fmt_vid(file, f, MVX_DIR_INPUT); +} + +static int mvx_v4l2_vidioc_s_fmt_vid(struct file *file, + struct v4l2_format *f, + enum mvx_direction dir) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct mvx_session_port *port = &vsession->session.port[dir]; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + struct v4l2_pix_format_mplane pix_mp; + enum mvx_format format; + unsigned int stride[MVX_BUFFER_NPLANES] = {0}; + unsigned int size[MVX_BUFFER_NPLANES]; + bool interlaced = false; + int ret; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + if (vport->q_set != false && vb2_is_busy(&vport->vb2_queue) != false) { + MVX_SESSION_WARN(&vsession->session, + "Can't set format when there there buffers allocated to the port."); + ret = -EBUSY; + goto unlock_mutex; + } + + /* Convert V4L2 format to V4L2 multi planar pixel format. */ + ret = from_v4l2_format(vsession, f, &pix_mp, &format, stride, size, + &interlaced); + if (ret != 0) + goto unlock_mutex; + + /* Validate and adjust settings. */ + ret = mvx_session_set_format(&vsession->session, dir, format, pix_mp.pixelformat, + &pix_mp.width, &pix_mp.height, + &pix_mp.num_planes, + stride, size, &interlaced); + if (ret != 0) + goto unlock_mutex; + + if (dir == MVX_DIR_INPUT && vsession->ext->is_encoder) + mvx_v4l2_session_set_color_info(vsession, &pix_mp); + + if (dir == MVX_DIR_OUTPUT && vsession->ext->is_encoder) + mvx_v4l2_copy_color_desc(&pix_mp, &vsession->port[MVX_DIR_INPUT].pix_mp); + + /* Convert V4L2 multi planar pixel format to format. */ + ret = to_v4l2_format(f, f->type, &pix_mp, stride, size, interlaced); + if (ret != 0) + goto unlock_mutex; + + vport->type = f->type; + vport->pix_mp = pix_mp; + + if (dir == MVX_DIR_OUTPUT && !vsession->ext->is_encoder && + !mvx_is_afbc(port->format)) { + mvx_session_update_buffer_count(&vsession->session, dir); + } + + /* Update output dimensions to align with input */ + if (dir == MVX_DIR_INPUT) { + vsession->port[MVX_DIR_OUTPUT].pix_mp.width = pix_mp.width; + vsession->port[MVX_DIR_OUTPUT].pix_mp.height = pix_mp.height; + } + +unlock_mutex: + mutex_unlock(&vsession->mutex); + + print_format(&vsession->session, f, "Set format"); + + return ret; +} + +int mvx_v4l2_vidioc_s_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_format *f) +{ + return mvx_v4l2_vidioc_s_fmt_vid(file, f, MVX_DIR_OUTPUT); +} + +int mvx_v4l2_vidioc_s_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_format *f) +{ + return mvx_v4l2_vidioc_s_fmt_vid(file, f, MVX_DIR_INPUT); +} + +static int mvx_v4l2_vidioc_try_fmt_vid(struct file *file, + struct v4l2_format *f, + enum mvx_direction dir) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct v4l2_pix_format_mplane pix; + enum mvx_format format; + unsigned int stride[MVX_BUFFER_NPLANES] = {0}; + unsigned int size[MVX_BUFFER_NPLANES]; + bool interlaced = false; + int ret; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + ret = from_v4l2_format(vsession, f, &pix, &format, stride, size, + &interlaced); + if (ret != 0) + goto unlock_mutex; + + ret = mvx_session_try_format(&vsession->session, dir, format, + &pix.width, &pix.height, &pix.num_planes, + stride, size, &interlaced); + if (ret != 0) + goto unlock_mutex; + + if ((dir == MVX_DIR_INPUT && vsession->ext->is_encoder) || !vsession->ext->is_encoder) + mvx_v4l2_session_try_color_info(vsession, &pix); + + if (dir == MVX_DIR_OUTPUT && vsession->ext->is_encoder) + mvx_v4l2_copy_color_desc(&pix, &vsession->port[MVX_DIR_INPUT].pix_mp); + + ret = to_v4l2_format(f, f->type, &pix, stride, size, interlaced); + if (ret != 0) + goto unlock_mutex; + +unlock_mutex: + mutex_unlock(&vsession->mutex); + + print_format(&vsession->session, f, "Try format"); + + return ret; +} + +int mvx_v4l2_vidioc_try_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_format *f) +{ + return mvx_v4l2_vidioc_try_fmt_vid(file, f, MVX_DIR_OUTPUT); +} + +int mvx_v4l2_vidioc_try_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_format *f) +{ + return mvx_v4l2_vidioc_try_fmt_vid(file, f, MVX_DIR_INPUT); +} + +static void mvx_get_composing(struct mvx_v4l2_session *vsession, + enum mvx_direction dir, + struct v4l2_selection *s) +{ + struct mvx_session *session = &vsession->session; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + struct mvx_session_port *port = &session->port[dir]; + enum mvx_format format = port->format; + + if (mvx_is_frame(format)) { + if (mvx_is_afbc(format)) { + s->r.left = vport->afbc_crop_left >> session->dual_afbc_downscaled; + s->r.top = vport->afbc_crop_top >> session->dual_afbc_downscaled; + } else { + s->r.left = 0; + s->r.top = 0; + } + s->r.width = port->width; + s->r.height = port->height; + } +} + +int mvx_v4l2_vidioc_g_selection(struct file *file, + void *fh, + struct v4l2_selection *s) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct mvx_session *session = &vsession->session; + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(s->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_session_port *port = &session->port[dir]; + enum mvx_format format = port->format; + int ret = 0; + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && + s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + switch (s->target) { + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + case V4L2_SEL_TGT_COMPOSE_PADDED: + s->r.left = 0; + s->r.top = 0; + s->r.width = session->orig_width; + s->r.height = session->orig_height; + break; + default: + mutex_unlock(&vsession->mutex); + return -EINVAL; + } + + if (s->target == V4L2_SEL_TGT_CROP && session->crop.crop_en) { + s->r.left = session->crop.x; + s->r.top = session->crop.y; + s->r.width = session->crop.width; + s->r.height = session->crop.height; + } + + if (s->target == V4L2_SEL_TGT_COMPOSE) { + if (vsession->ext->is_encoder) { + if (format == MVX_FORMAT_H264 || format == MVX_FORMAT_HEVC) { + /* + * Frame cropping offset parameters of H.264 or conformance + * cropping window offset parameters of HEVC + */ + s->r.left = session->crop_left; + s->r.top = session->crop_top; + s->r.width = port->width - session->crop_left - session->crop_right; + s->r.height = port->height - session->crop_top - session->crop_bottom; + } + } else { /* is decoder */ + mvx_get_composing(vsession, dir, s); + } + } else if (s->target == V4L2_SEL_TGT_COMPOSE_DEFAULT) { + if (!vsession->ext->is_encoder) + mvx_get_composing(vsession, dir, s); + } + + if (s->r.width == 0 || s->r.height == 0) + ret = -EINVAL; + + mutex_unlock(&vsession->mutex); + + if (ret == 0) + MVX_SESSION_INFO(session, + "v4l2: Get selection. target = %d, dir=%u, crop={left=%u, top=%u, width=%u, height=%u.", + s->target, dir, s->r.left, s->r.top, s->r.width, s->r.height); + + return ret; +} + +static void mvx_validate_enc_crop(unsigned int width, unsigned int height, + struct v4l2_rect *rect) +{ + if (rect->top < 0 || rect->left < 0 || rect->width == 0 || rect->height == 0) { + rect->top = 0; + rect->left = 0; + rect->width = width; + rect->height = height; + + return; + } + + rect->top = ALIGN(rect->top, 2); + rect->left = ALIGN(rect->left, 2); + rect->width = ALIGN(rect->width, 2); + rect->height = ALIGN(rect->height, 2); + + rect->width = clamp_t(uint32_t, rect->width, 16, width); + rect->left = min_t(uint32_t, rect->left, width - rect->width); + rect->height = clamp_t(uint32_t, rect->height, 16, height); + rect->top = min_t(uint32_t, rect->top, height - rect->height); +} + +static int mvx_set_enc_crop(struct mvx_v4l2_session *vsession, + struct v4l2_selection *s, enum mvx_direction dir) +{ + struct mvx_session_port *port = &vsession->session.port[dir]; + struct mvx_session *session = &vsession->session; + int ret; + + if (port->format != MVX_FORMAT_H264 && port->format != MVX_FORMAT_HEVC) { + if (port->width > 0 && port->height > 0 && + port->width != s->r.width && port->height != s->r.height) { + MVX_SESSION_ERR(session, + "v4l2: encode cropping is supported for H.264 and HEVC only."); + return -EINVAL; + } else { + return 0; + } + } + + mvx_validate_enc_crop(port->width, port->height, &s->r); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + do { + ret = mvx_session_set_crop_left(session, s->r.left); + if (ret != 0) + break; + ret = mvx_session_set_crop_top(session, s->r.top); + if (ret != 0) + break; + ret = mvx_session_set_crop_right(session, + port->width - s->r.width - s->r.left); + if (ret != 0) + break; + ret = mvx_session_set_crop_bottom(session, + port->height - s->r.height - s->r.top); + if (ret != 0) + break; + } while (0); + mutex_unlock(&vsession->mutex); + + MVX_SESSION_INFO(session, + "v4l2: Set enc crop. type=%u, crop={left=%u, top=%u, right=%u, bottom=%u.", + s->type, s->r.left, s->r.top, + port->width - s->r.width - s->r.left, + port->height - s->r.height - s->r.top); + + return ret; +} + +static void mvx_validate_crop(unsigned int width, unsigned int height, + struct v4l2_selection *s, int alignment) +{ + struct v4l2_rect *rect = &s->r; + + if (rect->top < 0 || rect->left < 0 || rect->width == 0 || rect->height == 0) { + rect->top = 0; + rect->left = 0; + rect->width = width; + rect->height = height; + + return; + } + + if (s->flags == V4L2_SEL_FLAG_GE) { + rect->top = round_up(rect->top, alignment); + rect->left = round_up(rect->left, alignment); + rect->width = round_up(rect->width, alignment); + rect->height = round_up(rect->height, alignment); + + rect->width = max_t(uint32_t, rect->width, 64); + rect->height = max_t(uint32_t, rect->height, 64); + } else { + rect->top = round_down(rect->top, alignment); + rect->left = round_down(rect->left, alignment); + rect->width = round_down(rect->width, alignment); + rect->height = round_down(rect->height, alignment); + + if (width >= 144) + rect->width = min_t(uint32_t, rect->width, width - rect->left); + if (height >= 144) + rect->height = min_t(uint32_t, rect->height, height - rect->top); + } +} + +static int mvx_set_crop(struct mvx_v4l2_session *vsession, + struct v4l2_selection *s, enum mvx_direction dir) +{ + struct mvx_session_port *port = &vsession->session.port[dir]; + struct mvx_session *session = &vsession->session; + struct mvx_crop_cfg mvx_crop; + int ret; + + if (mvx_is_afbc(port->format) || mvx_is_bitstream(port->format)) { + MVX_SESSION_WARN(session, + "v4l2: cropping is not supported for AFBC and bitstream."); + if (session->orig_width > 0 && session->orig_height > 0 && + session->orig_width != s->r.width && session->orig_height != s->r.height) + return -EINVAL; + else + return 0; + } + + mvx_validate_crop(session->orig_width, session->orig_height, s, + (vsession->ext->is_encoder) ? 2 : 4); + + if (session->orig_width >= 144 && session->orig_height >= 144) { + if ((session->orig_width < (s->r.left + s->r.width) || + session->orig_height < (s->r.top + s->r.height))) { + MVX_SESSION_WARN(session, "v4l2: crop size is larger than orignal size."); + return -ERANGE; + } + + if (s->r.width < 64 || s->r.height < 64) { + MVX_SESSION_WARN(session, "v4l2: crop size is smaller than 64."); + return -ERANGE; + } + + if (session->orig_width == s->r.width && session->orig_height == s->r.height ) + return 0; + } + + mvx_crop.crop_en = 1; + mvx_crop.x = s->r.left; + mvx_crop.y = s->r.top; + mvx_crop.width = s->r.width; + mvx_crop.height = s->r.height; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + ret = mvx_session_set_crop(session, &mvx_crop); + if (vsession->ext->is_encoder) { + /* For encoder, update the output resolution to cropped one */ + vsession->port[MVX_DIR_OUTPUT].pix_mp.width = s->r.width; + vsession->port[MVX_DIR_OUTPUT].pix_mp.height = s->r.height; + } else { + /* + * Update port resolution for decode only; keep original resolution + * for encoder to hold the whole frame data + */ + vsession->port[dir].pix_mp.width = s->r.width; + vsession->port[dir].pix_mp.height = s->r.height; + } + if (dir == MVX_DIR_OUTPUT && !vsession->ext->is_encoder && + !mvx_is_afbc(port->format)) { + mvx_session_update_buffer_count(session, dir); + } + + mutex_unlock(&vsession->mutex); + + MVX_SESSION_INFO(session, + "v4l2: Set crop. type=%u, crop={left=%u, top=%u, width=%u, height=%u.", + s->type, s->r.left, s->r.top, s->r.width, s->r.height); + + return ret; +} + +static void mvx_validate_scale(unsigned int width, unsigned int height, bool is_afbc, + struct v4l2_rect *rect) +{ + if (rect->top < 0 || rect->left < 0 || rect->width < 16 || rect->height < 16) { + rect->top = 0; + rect->left = 0; + rect->width = width; + rect->height = height; + return; + } + + rect->top = 0; + rect->left = 0; + rect->width = ALIGN(rect->width, 2); + rect->height = ALIGN(rect->height, 2); + + if (width >= 144) + rect->width = min_t(uint32_t, rect->width, width); + rect->width = max_t(uint32_t, rect->width, 16); + if (height >= 144) + rect->height = min_t(uint32_t, rect->height, height); + rect->height = max_t(uint32_t, rect->height, 16); + + if (is_afbc) { + /* AFBC supports dual downscaling only */ + rect->width = width >> 1; + rect->height = height >> 1; + } +} + +static int mvx_set_scale(struct mvx_v4l2_session *vsession, + struct v4l2_selection *s, enum mvx_direction dir) +{ + struct mvx_session_port *port = &vsession->session.port[dir]; + struct mvx_session *session = &vsession->session; + int ret; + + if (vsession->ext->is_encoder) + return -EINVAL; + + if ((session->orig_width < s->r.width || session->orig_height < s->r.height) && + (session->orig_width >= 144 && session->orig_height >= 144)) { + MVX_SESSION_WARN(session, "v4l2: Upscaling is not supported."); + return -EINVAL; + } + + if (session->orig_width == s->r.width && session->orig_height == s->r.height) + return 0; + mvx_validate_scale(session->orig_width, session->orig_height, + mvx_is_afbc(port->format), &s->r); + if (session->orig_width == s->r.width && session->orig_height == s->r.height) + return 0; + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + if (mvx_is_afbc(port->format)) { + MVX_SESSION_INFO(session, "v4l2: Force dual AFBC downscaling."); + ret = mvx_session_set_dual_afbc_downscaled(session, 1); + s->r.left = port->afbc_crop_left >> 1; + s->r.top = port->afbc_crop_top >> 1; + } else { + struct mvx_dsl_frame dsl_frame; + dsl_frame.width = s->r.width; + dsl_frame.height = s->r.height; + ret = mvx_session_set_dsl_frame(session, &dsl_frame); + } + + vsession->port[dir].pix_mp.width = s->r.width + s->r.left; + vsession->port[dir].pix_mp.height = s->r.height + s->r.top; + + if (dir == MVX_DIR_OUTPUT && !vsession->ext->is_encoder && + !mvx_is_afbc(port->format)) { + mvx_session_update_buffer_count(session, dir); + } + + mutex_unlock(&vsession->mutex); + + MVX_SESSION_INFO(session, + "v4l2: Set compose (scaling). type=%u, dst={left=%u, top=%u, width=%u, height=%u.", + s->type, s->r.left, s->r.top, s->r.width, s->r.height); + + return ret; +} + +int mvx_v4l2_vidioc_s_selection(struct file *file, + void *fh, + struct v4l2_selection *s) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(s->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && + s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + if (vsession->ext->is_encoder) { + if (s->target == V4L2_SEL_TGT_COMPOSE && dir == MVX_DIR_OUTPUT) + return mvx_set_enc_crop(vsession, s, dir); + else if (s->target == V4L2_SEL_TGT_CROP && dir == MVX_DIR_INPUT) + return mvx_set_crop(vsession, s, dir); + } else { + if (s->target == V4L2_SEL_TGT_COMPOSE && dir == MVX_DIR_OUTPUT) + return mvx_set_scale(vsession, s, dir); + else if (s->target == V4L2_SEL_TGT_CROP && dir == MVX_DIR_OUTPUT) + return mvx_set_crop(vsession, s, dir); + } + + return -EINVAL; +} + +int mvx_v4l2_vidioc_g_parm(struct file *file, + void *fh, + struct v4l2_streamparm *a) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct mvx_session *session = &vsession->session; + struct v4l2_fract *frameperiod = &a->parm.capture.timeperframe; + + if (!vsession->ext->is_encoder) { + if (!V4L2_TYPE_IS_OUTPUT(a->type)) { + /* + * Decode driver doesn't get frame rate from bitstream. So we + * don't set V4L2_CAP_TIMEPERFRAME capability, but just set + * numerator and denominator to non-zero values here in case + * client uses them and encounters divided-by-zero exception. + */ + a->parm.capture.capability = 0; + } else { + /* Allow client to set output port fps but driver won't send it to VPU */ + a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; + } + + if (frameperiod->denominator == 0) { + frameperiod->numerator = session->fps_d; + frameperiod->denominator = session->fps_n; + } + } else { + frameperiod->numerator = session->fps_d; + frameperiod->denominator = session->fps_n; + a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + } + + return 0; +} + +int mvx_v4l2_vidioc_s_parm(struct file *file, + void *fh, + struct v4l2_streamparm *a) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct mvx_session *session = &vsession->session; + struct v4l2_fract *frameperiod; + + if (mutex_lock_interruptible(&vsession->mutex) != 0) + return -EINTR; + + if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + struct v4l2_captureparm *cparm = &a->parm.capture; + /* Just do sanity check but not update fps as VPU doesn't support frame rate conversion */ + frameperiod = &a->parm.capture.timeperframe; + if (frameperiod->numerator * session->fps_n != frameperiod->denominator * session->fps_d || + frameperiod->numerator == 0 || frameperiod->denominator == 0) { + MVX_SESSION_WARN(&session, + "v4l2: Invalid frame period from client (%d/%d). Return %d/%d", + frameperiod->numerator, frameperiod->denominator, session->fps_n, session->fps_d); + frameperiod->numerator = session->fps_d; + frameperiod->denominator = session->fps_n; + } + if (vsession->ext->is_encoder) + cparm->capability = V4L2_CAP_TIMEPERFRAME; + } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || + a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + struct v4l2_outputparm *oparm = &a->parm.output; + frameperiod = &a->parm.output.timeperframe; + if (frameperiod->numerator == 0 || frameperiod->denominator == 0) { + frameperiod->numerator = session->fps_n; + frameperiod->denominator = session->fps_d; + oparm->capability = V4L2_CAP_TIMEPERFRAME; + mutex_unlock(&vsession->mutex); + return 0; + } + if ((uint64_t)frameperiod->denominator > (uint64_t)frameperiod->numerator * MAX_FRAME_RATE) { + MVX_SESSION_WARN(&session, + "v4l2: Framerate is larger than maximum value of VPU"); + frameperiod->denominator = MAX_FRAME_RATE * frameperiod->numerator; + } + /* Set frame rate if it's valid */ + mvx_session_set_frame_rate(session, frameperiod->denominator, frameperiod->numerator); + oparm->capability = V4L2_CAP_TIMEPERFRAME; + } else { + mutex_unlock(&vsession->mutex); + return -EINVAL; + } + + mutex_unlock(&vsession->mutex); + return 0; +} + +int mvx_v4l2_vidioc_streamon(struct file *file, + void *priv, + enum v4l2_buf_type type) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + int ret; + + MVX_SESSION_INFO(&vsession->session, "v4l2: Stream on. dir=%u.", dir); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + ret = vb2_streamon(&vsession->port[dir].vb2_queue, type); + if (ret != 0) + MVX_SESSION_WARN(&vsession->session, + "v4l2: Failed to stream on. dir=%u.", dir); + + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_streamoff(struct file *file, + void *priv, + enum v4l2_buf_type type) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + int ret; + + MVX_SESSION_INFO(&vsession->session, "v4l2: Stream off. dir=%u.", dir); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + ret = vb2_streamoff(&vsession->port[dir].vb2_queue, type); + if (ret != 0) + MVX_SESSION_WARN(&vsession->session, + "v4l2: Failed to stream off. dir=%u.", dir); + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Stream off exit. dir=%u, ret=%d.", + dir, ret); + + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_encoder_cmd(struct file *file, + void *priv, + struct v4l2_encoder_cmd *cmd) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + int ret; + + if (!vsession->ext->is_encoder) + return -ENOTTY; + + MVX_SESSION_INFO(&vsession->session, "v4l2: encoder cmd: %u.", + cmd->cmd); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + switch (cmd->cmd) { + case V4L2_ENC_CMD_STOP: + ret = mvx_session_send_eos(&vsession->session); + break; + default: + MVX_SESSION_WARN(&vsession->session, + "Unsupported command. cmd: %u.", cmd->cmd); + ret = -EINVAL; + } + + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_try_encoder_cmd(struct file *file, + void *priv, + struct v4l2_encoder_cmd *cmd) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + + if (!vsession->ext->is_encoder) + return -ENOTTY; + + switch (cmd->cmd) { + case V4L2_ENC_CMD_STOP: + return 0; + default: + return -EINVAL; + } +} + +int mvx_v4l2_vidioc_decoder_cmd(struct file *file, + void *priv, + struct v4l2_decoder_cmd *cmd) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + int ret; + + if (vsession->ext->is_encoder) + return -ENOTTY; + + MVX_SESSION_INFO(&vsession->session, "v4l2: decoder cmd: %u.", + cmd->cmd); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + ret = mvx_session_send_eos(&vsession->session); + break; + case V4L2_DEC_CMD_START: + ret = mvx_session_start(&vsession->session); + break; + default: + MVX_SESSION_WARN(&vsession->session, + "Unsupported command. cmd: %u.", cmd->cmd); + ret = -EINVAL; + } + + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_try_decoder_cmd(struct file *file, + void *priv, + struct v4l2_decoder_cmd *cmd) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + + if (vsession->ext->is_encoder) + return -ENOTTY; + + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + cmd->stop.pts = 0; + break; + case V4L2_DEC_CMD_START: + cmd->start.speed = 0; + cmd->start.format = V4L2_DEC_START_FMT_NONE; + break; + default: + return -EINVAL; + } + + cmd->flags = 0; + + return 0; +} + +int mvx_v4l2_vidioc_reqbufs(struct file *file, + void *fh, + struct v4l2_requestbuffers *b) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + int ret; + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Request buffers. dir=%d, type=%u, memory=%u, count=%u.", + dir, b->type, b->memory, b->count); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + if (b->count == 0) { + if (vport->q_set != false) { + vb2_queue_release(&vport->vb2_queue); + vport->q_set = false; + } + } else { + if (vport->q_set == false) { + /* Set buffer type in case of calling REQBUFS before S_FMT */ + vport->type = b->type; + ret = setup_vb2_queue(vport); + if (ret != 0) + goto unlock_mutex; + + vport->q_set = true; + } + + ret = vb2_reqbufs(&vport->vb2_queue, b); + } + vport->port->buffer_allocated = b->count; +unlock_mutex: + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_create_bufs(struct file *file, + void *fh, + struct v4l2_create_buffers *b) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->format.type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + int ret; + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Create buffers. dir=%d, type=%u, memory=%u, count=%u.", + dir, b->format.type, b->memory, b->count); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + if (vport->q_set == false) + ret = setup_vb2_queue(vport); + + if (ret != 0) + goto unlock_mutex; + + vport->q_set = true; + + ret = vb2_create_bufs(&vport->vb2_queue, b); + vport->port->buffer_allocated += b->count; + MVX_SESSION_INFO(&vsession->session,"count =%d,buffer_allocated=%d",b->count,vport->port->buffer_allocated); +unlock_mutex: + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_querybuf(struct file *file, + void *fh, + struct v4l2_buffer *b) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + int ret; + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Query buffer. dir=%d, type=%u, memory=%u, index=%u.", + dir, b->type, b->memory, b->index); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + ret = vb2_querybuf(&vport->vb2_queue, b); + if (ret != 0) + goto unlock_mutex; + + /* + * When user space wants to mmap() a buffer, we have to be able to + * determine a direction of coresponding port. To make it easier we + * adjust mem_offset on output port by DST_QUEUE_OFF_BASE for all + * buffers. + */ + if (dir == MVX_DIR_OUTPUT) { + if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + int i; + + for (i = 0; i < b->length; ++i) + b->m.planes[i].m.mem_offset += + DST_QUEUE_OFF_BASE; + } else { + b->m.offset += DST_QUEUE_OFF_BASE; + } + } + +unlock_mutex: + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_qbuf(struct file *file, + void *fh, + struct v4l2_buffer *b) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + struct mvx_v4l2_buffer *vbuf; + struct mvx_buffer *buf; + struct vb2_buffer *vb; + struct v4l2_core_buffer_header_general *v4l2_general; + int ret; + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Queue buffer. dir=%d, type=%u, index=%u, flags=0x%x.", + dir, b->type, b->index, b->flags); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) { + MVX_SESSION_WARN(&vsession->session, "v4l2: Queue buffer. Get lock failed."); + return -EAGAIN; + } + + if ((b->flags & V4L2_BUF_FLAG_MVX_BUFFER_GENERAL) == V4L2_BUF_FLAG_MVX_BUFFER_GENERAL ){ + vb = vport->vb2_queue.bufs[b->index]; + vbuf = vb2_to_mvx_v4l2_buffer(vb); + buf = &vbuf->buf; + v4l2_general = (struct v4l2_core_buffer_header_general *)&b->m.planes[0].reserved[0]; + buf->general.header.buffer_size = v4l2_general->buffer_size; + buf->general.header.config_size = v4l2_general->config_size; + buf->general.header.type = v4l2_general->type; + + if (v4l2_general->type == V4L2_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS) { + memcpy(&buf->general.config.block_configs, &v4l2_general->config.config, sizeof(v4l2_general->config.config)); + MVX_SESSION_INFO(&vsession->session, + "v4l2: Queue buffer. type:%d, config size:%d, buffer size:%d, cfg_type:0x%x, cols and rows:%d, %d", + v4l2_general->type ,v4l2_general->config_size, v4l2_general->buffer_size, + v4l2_general->config.config.blk_cfg_type,v4l2_general->config.config.blk_cfgs.rows_uncomp.n_cols_minus1, + v4l2_general->config.config.blk_cfgs.rows_uncomp.n_rows_minus1); + } else { + MVX_SESSION_ERR(&vsession->session, + "v4l2: Queue buffer. Unknow general buffer type:%d", v4l2_general->type); + } + } + if (dir == MVX_DIR_INPUT && V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + vb = vport->vb2_queue.bufs[b->index]; + vbuf = vb2_to_mvx_v4l2_buffer(vb); + buf = &vbuf->buf; + if (b->reserved2 & V4L2_BUF_FLAG_MVX_MINIFRAME) { + //this is miniframe encode mode. + buf->offset = b->m.planes[0].reserved[10]; + } else { + buf->offset = 0; + } + if (b->reserved2 & V4L2_BUF_FLAG_MVX_OSD_MASK) { + buf->flags |= b->reserved2 & V4L2_BUF_FLAG_MVX_OSD_1 ? MVX_BUFFER_FRAME_FLAG_OSD_1 : 0; + buf->flags |= b->reserved2 & V4L2_BUF_FLAG_MVX_OSD_2 ? MVX_BUFFER_FRAME_FLAG_OSD_2 : 0; + } else { + buf->flags &= ~MVX_BUFFER_FRAME_FLAG_OSD_MASK; + } + } + ret = vb2_qbuf(&vport->vb2_queue, NULL, b); + if (ret != 0) + MVX_SESSION_VERBOSE(&vsession->session, "v4l2: Queue buffer. vb2_qbuf() failed, dir=%d, ret=%d", + dir, ret); + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_dqbuf(struct file *file, + void *fh, + struct v4l2_buffer *b) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + struct mvx_ext_if *ctx = vsession->ext; + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + struct vb2_buffer *vb; + struct mvx_v4l2_buffer *vbuf; + struct mvx_buffer *buf; + int ret; + uint32_t i; + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Dequeue buffer. dir=%d, type=%u.", + dir, b->type); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) { + MVX_SESSION_WARN(&vsession->session, "v4l2: Dequeue buffer. Get lock failed."); + return -EAGAIN; + } + + ret = vb2_dqbuf(&vport->vb2_queue, b, file->f_flags & O_NONBLOCK); + if (ret != 0) { + MVX_SESSION_VERBOSE(&vsession->session, "v4l2: Dequeue buffer. vb2_dqbuf() failed, dir=%d, ret=%d", + dir, ret); + goto unlock_mutex; + } + + if ((dir == MVX_DIR_OUTPUT) && (b->flags & V4L2_BUF_FLAG_LAST)) { + const struct v4l2_event event = { + .type = V4L2_EVENT_EOS + }; + v4l2_event_queue_fh(&vsession->fh, &event); + } + + /* + * For single planar buffers there is no data offset. Instead the + * offset is added to the memory pointer and subtraced from the + * bytesused. + */ + vb = vport->vb2_queue.bufs[b->index]; + if (V4L2_TYPE_IS_MULTIPLANAR(vb->type) == false) { + b->bytesused -= vb->planes[0].data_offset; + + switch (vb->type) { + case V4L2_MEMORY_MMAP: + b->m.offset += vb->planes[0].data_offset; + break; + case V4L2_MEMORY_USERPTR: + b->m.userptr += vb->planes[0].data_offset; + break; + default: + break; + } + } + + if (dir == MVX_DIR_OUTPUT && !V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + vbuf = vb2_to_mvx_v4l2_buffer(vb); + buf = &vbuf->buf; + b->reserved2 = 0; + b->reserved2 = (buf->frame_type << 24) | (buf->src_transform << 16) | (buf->bitstream_remaining_kb); + } + if (vsession->port[MVX_DIR_INPUT].port->format <= MVX_FORMAT_BITSTREAM_LAST && + dir == MVX_DIR_OUTPUT && V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + vbuf = vb2_to_mvx_v4l2_buffer(vb); + buf = &vbuf->buf; + b->reserved2 = 0; + b->reserved2 = (buf->width << 16) | (buf->height); + for (i = 0; i < b->length; i++) + { + b->m.planes[i].reserved[0] = buf->planes[i].stride; + } + } + +unlock_mutex: + mutex_unlock(&vsession->mutex); + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Dequeued buffer ret=%d. dir=%d, type=%u, index=%u, flags=0x%x, nevents=%u, fh=%px.", + ret, dir, b->type, b->index, b->flags, + v4l2_event_pending(&vsession->fh), fh); + + return ret; +} + +int mvx_v4l2_vidioc_expbuf(struct file *file, + void *fh, + struct v4l2_exportbuffer *b) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + enum mvx_direction dir = V4L2_TYPE_IS_OUTPUT(b->type) ? + MVX_DIR_INPUT : MVX_DIR_OUTPUT; + struct mvx_v4l2_port *vport = &vsession->port[dir]; + int ret; + + MVX_SESSION_INFO(&vsession->session, + "v4l2: Export buffer. dir=%d, type=%u, index=%u, plane=%u.", + dir, b->type, b->index, b->plane); + + ret = mutex_lock_interruptible(&vsession->mutex); + if (ret != 0) + return ret; + + ret = vb2_expbuf(&vport->vb2_queue, b); + + mutex_unlock(&vsession->mutex); + + return ret; +} + +int mvx_v4l2_vidioc_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct mvx_v4l2_session *session = v4l2_fh_to_session(fh); + + MVX_SESSION_INFO(&session->session, + "v4l2: Subscribe event. fh=%px, type=%u.", fh, + sub->type); + + switch (sub->type) { + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + case V4L2_EVENT_EOS: + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_event_subscribe(fh, sub, 2, NULL); + default: + MVX_SESSION_WARN(&session->session, + "Can't register for unsupported event. type=%u.", + sub->type); + return -EINVAL; + } + + return 0; +} + +long mvx_v4l2_vidioc_default(struct file *file, + void *fh, + bool valid_prio, + unsigned int cmd, + void *arg) +{ + struct mvx_v4l2_session *vsession = file_to_session(file); + int ret; + MVX_SESSION_INFO(&vsession->session, + "Custom ioctl. cmd=0x%x, arg=0x%px.", cmd, arg); + + if (mutex_lock_interruptible(&vsession->mutex) != 0) + return -EINTR; + + switch (cmd) { + case VIDIOC_S_MVX_ROI_REGIONS: { + ret = mvx_v4l2_session_set_roi_regions(vsession, arg); + break; + } + case VIDIOC_S_MVX_QP_EPR: { + ret = mvx_v4l2_session_set_qp_epr(vsession, arg); + break; + } + case VIDIOC_S_MVX_SEI_USERDATA: { + ret = mvx_v4l2_session_set_sei_userdata(vsession, arg); + break; + } + case VIDIOC_S_MVX_DSL_RATIO: { + ret = mvx_v4l2_session_set_dsl_ratio(vsession, arg); + break; + } + case VIDIOC_S_MVX_LONG_TERM_REF: { + ret = mvx_v4l2_session_set_long_term_ref(vsession, arg); + break; + } + case VIDIOC_S_MVX_DSL_MODE: { + ret = mvx_v4l2_session_set_dsl_mode(vsession, arg); + break; + } + case VIDIOC_S_MVX_MINI_FRAME_CNT: { + ret = mvx_v4l2_session_set_mini_frame_cnt(vsession, arg); + break; + } + case VIDIOC_S_MVX_STATS_MODE: { + ret = mvx_v4l2_session_set_stats_mode(vsession, arg); + break; + } + case VIDIOC_S_MVX_CHR_CFG: { + ret = mvx_v4l2_session_set_chr_cfg(vsession, arg); + break; + } + case VIDIOC_S_MVX_HUFF_TABLE: { + ret = mvx_v4l2_session_set_huff_table(vsession, arg); + break; + } + case VIDIOC_S_MVX_SEAMLESS_TARGET: { + ret = mvx_v4l2_session_set_seamless_target(vsession, arg); + break; + } + case VIDIOC_S_MVX_COLOR_CONV_COEF: { + ret = mvx_v4l2_session_set_color_conv_coef(vsession, arg); + break; + } + case VIDIOC_S_MVX_RGB2YUV_COLOR_CONV_COEF: { + ret = mvx_v4l2_session_set_rgb_conv_yuv_coef(vsession, arg); + break; + } + case VIDIOC_S_MVX_OSD_CONFIG: { + ret = mvx_v4l2_session_set_osd_config(vsession, arg); + break; + } + case VIDIOC_S_MVX_OSD_INFO: { + struct v4l2_osd_info* osd_info = (struct v4l2_osd_info*)arg; + enum mvx_format osd_fmt[MVX_MAX_FRAME_OSD_REGION]; + int i; + struct mvx_session_format_map* osd_map; + for (i = 0; i < MVX_MAX_FRAME_OSD_REGION; i++) { + osd_map = mvx_session_find_format(osd_info->inputFormat_osd[i]); + osd_fmt[i] = osd_map->format; + } + ret = mvx_v4l2_session_set_osd_info(vsession, arg, osd_fmt); + break; + } + case VIDIOC_S_MVX_ENC_LAMBDA_SCALE: { + ret = mvx_v4l2_session_set_enc_lambda_scale(vsession, arg); + break; + } + default: + MVX_LOG_PRINT(&mvx_log_if, MVX_LOG_WARNING, + "Unsupported IOCTL. cmd=0x%x", cmd); + ret = -ENOTTY; + } + + mutex_unlock(&vsession->mutex); + return ret; +} diff --git a/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_vidioc.h b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_vidioc.h new file mode 100755 index 000000000000..8580e32a100b --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/if/v4l2/mvx_v4l2_vidioc.h @@ -0,0 +1,157 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_V4L2_VIDIOC_H_ +#define _MVX_V4L2_VIDIOC_H_ + +/**************************************************************************** + * Exported functions + * + * Callbacks for struct v4l2_ioctl_ops. + * + * Prototypes declared bellow implement certain v4l2 ioctls and used to + * initialize members of v4l2_ioctl_ops structure. + ****************************************************************************/ + +int mvx_v4l2_vidioc_querycap(struct file *file, + void *fh, + struct v4l2_capability *cap); + +int mvx_v4l2_vidioc_enum_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_fmtdesc *f); + +int mvx_v4l2_vidioc_enum_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_fmtdesc *f); + +int mvx_v4l2_vidioc_enum_framesizes(struct file *file, + void *fh, + struct v4l2_frmsizeenum *fsize); + +int mvx_v4l2_vidioc_g_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_format *f); + +int mvx_v4l2_vidioc_g_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_format *f); + +int mvx_v4l2_vidioc_s_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_format *f); + +int mvx_v4l2_vidioc_s_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_format *f); + +int mvx_v4l2_vidioc_try_fmt_vid_cap(struct file *file, + void *fh, + struct v4l2_format *f); + +int mvx_v4l2_vidioc_try_fmt_vid_out(struct file *file, + void *fh, + struct v4l2_format *f); + +int mvx_v4l2_vidioc_g_selection(struct file *file, + void *fh, + struct v4l2_selection *s); + +int mvx_v4l2_vidioc_s_selection(struct file *file, + void *fh, + struct v4l2_selection *s); + +int mvx_v4l2_vidioc_g_parm(struct file *file, + void *fh, + struct v4l2_streamparm *a); + +int mvx_v4l2_vidioc_s_parm(struct file *file, + void *fh, + struct v4l2_streamparm *a); + +int mvx_v4l2_vidioc_streamon(struct file *file, + void *priv, + enum v4l2_buf_type type); + +int mvx_v4l2_vidioc_streamoff(struct file *file, + void *priv, + enum v4l2_buf_type type); + +int mvx_v4l2_vidioc_encoder_cmd(struct file *file, + void *priv, + struct v4l2_encoder_cmd *cmd); + +int mvx_v4l2_vidioc_try_encoder_cmd(struct file *file, + void *priv, + struct v4l2_encoder_cmd *cmd); + +int mvx_v4l2_vidioc_decoder_cmd(struct file *file, + void *priv, + struct v4l2_decoder_cmd *cmd); + +int mvx_v4l2_vidioc_try_decoder_cmd(struct file *file, + void *priv, + struct v4l2_decoder_cmd *cmd); + +int mvx_v4l2_vidioc_reqbufs(struct file *file, + void *fh, + struct v4l2_requestbuffers *b); + +int mvx_v4l2_vidioc_create_bufs(struct file *file, + void *fh, + struct v4l2_create_buffers *b); + +int mvx_v4l2_vidioc_querybuf(struct file *file, + void *fh, + struct v4l2_buffer *b); + +int mvx_v4l2_vidioc_qbuf(struct file *file, + void *fh, + struct v4l2_buffer *b); + +int mvx_v4l2_vidioc_dqbuf(struct file *file, + void *fh, + struct v4l2_buffer *b); + +int mvx_v4l2_vidioc_expbuf(struct file *file, + void *fh, + struct v4l2_exportbuffer *b); + +int mvx_v4l2_vidioc_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub); + +long mvx_v4l2_vidioc_default(struct file *file, + void *fh, + bool valid_prio, + unsigned int cmd, + void *arg); + +#endif /* _MVX_V4L2_VIDIOC_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/linux/mvx-v4l2-controls.h b/drivers/media/platform/cix/cix_vpu/linux/mvx-v4l2-controls.h new file mode 100755 index 000000000000..00701b964e64 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/linux/mvx-v4l2-controls.h @@ -0,0 +1,661 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_V4L2_CONTROLS_H_ +#define _MVX_V4L2_CONTROLS_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include +#include + +/**************************************************************************** + * Pixel formats + ****************************************************************************/ + +#define V4L2_PIX_FMT_YUV420_AFBC_8 v4l2_fourcc('Y', '0', 'A', '8') +#define V4L2_PIX_FMT_YUV420_AFBC_10 v4l2_fourcc('Y', '0', 'A', 'A') +#define V4L2_PIX_FMT_YUV422_AFBC_8 v4l2_fourcc('Y', '2', 'A', '8') +#define V4L2_PIX_FMT_YUV422_AFBC_10 v4l2_fourcc('Y', '2', 'A', 'A') +#define V4L2_PIX_FMT_Y_AFBC_8 v4l2_fourcc('Y', 'X', 'A', '8') +#define V4L2_PIX_FMT_Y_AFBC_10 v4l2_fourcc('Y', 'X', 'A', 'A') +#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') +#ifndef V4L2_PIX_FMT_P010 +#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') +#endif +#define V4L2_PIX_FMT_P010M v4l2_fourcc('M', '0', '1', '0') +#define V4L2_PIX_FMT_Y0L2 v4l2_fourcc('Y', '0', 'Y', 'L') +#define V4L2_PIX_FMT_RGB_3P v4l2_fourcc('R', 'G', 'B', 'M') + + +#define V4L2_PIX_FMT_Y10_LE v4l2_fourcc('Y', '1', '0', 'L') +#define V4L2_PIX_FMT_YUV444_10 v4l2_fourcc('Y', '4', 'P', '3') +#define V4L2_PIX_FMT_YUV422_1P_10 v4l2_fourcc('Y', '2', 'P', '1') +#define V4L2_PIX_FMT_YUV420_2P_10 v4l2_fourcc('Y', '0', 'P', '2') +#define V4L2_PIX_FMT_YUV420_I420_10 v4l2_fourcc('Y', '0', 'P', '3') + + +#define V4L2_PIX_FMT_RV v4l2_fourcc('R', 'V', '0', '0') + +#ifndef V4L2_PIX_FMT_HEVC +#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') +#endif + +#ifndef V4L2_PIX_FMT_VP9 +#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') +#endif + +#ifndef V4L2_PIX_FMT_AV1 +#define V4L2_PIX_FMT_AV1 v4l2_fourcc('A', 'V', '0', '1') +#endif + +#define V4L2_PIX_FMT_AVS v4l2_fourcc('A', 'V', 'S', '1') +#define V4L2_PIX_FMT_AVS2 v4l2_fourcc('A', 'V', 'S', '2') + +#ifndef V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP +#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_MPEG_BASE + 600) +#endif +#ifndef V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP +#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_MPEG_BASE + 601) +#endif +#ifndef V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP +#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_MPEG_BASE + 602) +#endif +#ifndef V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP +#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_MPEG_BASE + 603) +#endif +#ifndef V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP +#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_MPEG_BASE + 604) +#endif +/**************************************************************************** + * Buffers + * @see v4l2_buffer + ****************************************************************************/ + +/* + * Extended buffer flags. + */ +/* +#define V4L2_BUF_FLAG_MVX_DECODE_ONLY 0x01000000 +#define V4L2_BUF_FLAG_MVX_CODEC_CONFIG 0x02000000 +#define V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS 0x10000000 +#define V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY 0x20000000 +#define V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK 0x40000000 +#define V4L2_BUF_FLAG_MVX_MASK 0xff000000 +#define V4L2_BUF_FLAG_END_OF_SUB_FRAME 0x04000000 +#define V4L2_BUF_FLAG_MVX_BUFFER_FRAME_PRESENT 0x08000000 +#define V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC 0x07000000 + + +#define V4L2_BUF_FRAME_FLAG_ROTATION_90 0x81000000 +#define V4L2_BUF_FRAME_FLAG_ROTATION_180 0x82000000 +#define V4L2_BUF_FRAME_FLAG_ROTATION_270 0x83000000 +#define V4L2_BUF_FRAME_FLAG_ROTATION_MASK 0x83000000 +#define V4L2_BUF_FRAME_FLAG_MIRROR_HORI 0x90000000 +#define V4L2_BUF_FRAME_FLAG_MIRROR_VERT 0xA0000000 +#define V4L2_BUF_FRAME_FLAG_MIRROR_MASK 0xB0000000 +#define V4L2_BUF_FRAME_FLAG_SCALING_2 0x84000000 +#define V4L2_BUF_FRAME_FLAG_SCALING_4 0x88000000 +#define V4L2_BUF_FRAME_FLAG_SCALING_MASK 0x8C000000 + +#define V4L2_BUF_FLAG_MVX_BUFFER_EPR 0xC0000000 +#define V4L2_BUF_FLAG_MVX_BUFFER_ROI 0x70000000 +*/ +//redefine these flags +/*use encode/decode frame/bitstream to update these flags*/ + +#define V4L2_BUF_FLAG_MVX_MASK 0xfff00000 + +//for decode frame flag +#define V4L2_BUF_FRAME_FLAG_ROTATION_90 0x01000000 /* Frame is rotated 90 degrees */ +#define V4L2_BUF_FRAME_FLAG_ROTATION_180 0x02000000 /* Frame is rotated 180 degrees */ +#define V4L2_BUF_FRAME_FLAG_ROTATION_270 0x03000000 /* Frame is rotated 270 degrees */ +#define V4L2_BUF_FRAME_FLAG_ROTATION_MASK 0x03000000 +#define V4L2_BUF_FRAME_FLAG_SCALING_2 0x04000000 /* Frame is scaled by half */ +#define V4L2_BUF_FRAME_FLAG_SCALING_4 0x08000000 /* Frame is scaled by quarter */ +#define V4L2_BUF_FRAME_FLAG_SCALING_MASK 0x0C000000 +#define V4L2_BUF_FLAG_MVX_BUFFER_FRAME_PRESENT 0x10000000 +#define V4L2_BUF_FLAG_MVX_BUFFER_NEED_REALLOC 0x20000000 +#define V4L2_BUF_FLAG_MVX_DECODE_ONLY 0x40000000 + +//for bitstream flag +#define V4L2_BUF_FLAG_MVX_CODEC_CONFIG 0xC1000000 +#define V4L2_BUF_FLAG_END_OF_SUB_FRAME 0xC2000000 + +//for encode frame flag +#define V4L2_BUF_FRAME_FLAG_MIRROR_HORI 0x01000000 +#define V4L2_BUF_FRAME_FLAG_MIRROR_VERT 0x02000000 +#define V4L2_BUF_FRAME_FLAG_MIRROR_MASK 0x03000000 +#define V4L2_BUF_ENCODE_FLAG_ROTATION_90 0x10000000 /* Frame is rotated 90 degrees */ +#define V4L2_BUF_ENCODE_FLAG_ROTATION_180 0x20000000 /* Frame is rotated 180 degrees */ +#define V4L2_BUF_ENCODE_FLAG_ROTATION_270 0x30000000 /* Frame is rotated 270 degrees */ +#define V4L2_BUF_ENCODE_FLAG_ROTATION_MASK 0x30000000 + +#define V4L2_BUF_FLAG_MVX_BUFFER_ROI 0x80000000 /* this buffer has a roi region */ +#define V4L2_BUF_FLAG_MVX_BUFFER_EPR 0x08000000 /* EPR buffer flag */ +#define V4L2_BUF_FLAG_MVX_BUFFER_GENERAL 0x08000000 +#define V4L2_BUF_FLAG_MVX_BUFFER_CHR 0x40000000 + +#define V4L2_BUF_FLAG_MVX_BUFFER_RESET_RC 0x00200000 +#define V4L2_BUF_FLAG_MVX_BUFFER_ENC_STATS 0x00400000 /* reset LTR */ + +//afbc flag +#define V4L2_BUF_FLAG_MVX_AFBC_TILED_HEADERS 0x01000000 +#define V4L2_BUF_FLAG_MVX_AFBC_TILED_BODY 0x02000000 +#define V4L2_BUF_FLAG_MVX_AFBC_32X8_SUPERBLOCK 0x04000000 + +//for customeized flag, set to v4l2_buffer.reserved2 +#define V4L2_BUF_FLAG_MVX_MINIFRAME 0x00000001 +#define V4L2_BUF_FLAG_MVX_OSD_1 0x00000002 +#define V4L2_BUF_FLAG_MVX_OSD_2 0x00000004 +#define V4L2_BUF_FLAG_MVX_OSD_MASK 0x00000006 + +#define V4L2_MVX_MAX_FRAME_REGIONS 16 +#define V4L2_MAX_FRAME_OSD_REGION 2 +enum v4l2_mvx_range { + V4L2_MVX_RANGE_UNSPECIFIED, + V4L2_MVX_RANGE_FULL, + V4L2_MVX_RANGE_LIMITED +}; + +enum v4l2_mvx_primaries { + V4L2_MVX_PRIMARIES_UNSPECIFIED, + V4L2_MVX_PRIMARIES_BT709, /* Rec.ITU-R BT.709 */ + V4L2_MVX_PRIMARIES_BT470M, /* Rec.ITU-R BT.470 System M */ + V4L2_MVX_PRIMARIES_BT601_625, /* Rec.ITU-R BT.601 625 */ + V4L2_MVX_PRIMARIES_BT601_525, /* Rec.ITU-R BT.601 525 */ + V4L2_MVX_PRIMARIES_GENERIC_FILM, /* Generic Film */ + V4L2_MVX_PRIMARIES_BT2020 /* Rec.ITU-R BT.2020 */ +}; + +enum v4l2_mvx_transfer { + V4L2_MVX_TRANSFER_UNSPECIFIED, + V4L2_MVX_TRANSFER_LINEAR, /* Linear transfer characteristics */ + V4L2_MVX_TRANSFER_SRGB, /* sRGB */ + V4L2_MVX_TRANSFER_SMPTE170M, /* SMPTE 170M */ + V4L2_MVX_TRANSFER_GAMMA22, /* Assumed display gamma 2.2 */ + V4L2_MVX_TRANSFER_GAMMA28, /* Assumed display gamma 2.8 */ + V4L2_MVX_TRANSFER_ST2084, /* SMPTE ST 2084 */ + V4L2_MVX_TRANSFER_HLG, /* ARIB STD-B67 hybrid-log-gamma */ + V4L2_MVX_TRANSFER_SMPTE240M, /* SMPTE 240M */ + V4L2_MVX_TRANSFER_XVYCC, /* IEC 61966-2-4 */ + V4L2_MVX_TRANSFER_BT1361, /* Rec.ITU-R BT.1361 extended gamut */ + V4L2_MVX_TRANSFER_ST428 /* SMPTE ST 428-1 */ +}; + +enum v4l2_mvx_matrix { + V4L2_MVX_MATRIX_UNSPECIFIED, + V4L2_MVX_MATRIX_BT709, /* Rec.ITU-R BT.709 */ + V4L2_MVX_MATRIX_BT470M, /* KR=0.30, KB=0.11 */ + V4L2_MVX_MATRIX_BT601, /* Rec.ITU-R BT.601 625 */ + V4L2_MVX_MATRIX_SMPTE240M, /* SMPTE 240M or equivalent */ + V4L2_MVX_MATRIX_BT2020, /* Rec.ITU-R BT.2020 non-const lum */ + V4L2_MVX_MATRIX_BT2020Constant /* Rec.ITU-R BT.2020 constant lum */ +}; + +enum v4l2_nalu_format { + V4L2_OPT_NALU_FORMAT_UNDERFINED, + V4L2_OPT_NALU_FORMAT_START_CODES, + V4L2_OPT_NALU_FORMAT_ONE_NALU_PER_BUFFER, + V4L2_OPT_NALU_FORMAT_ONE_BYTE_LENGTH_FIELD, + V4L2_OPT_NALU_FORMAT_TWO_BYTE_LENGTH_FIELD, + V4L2_OPT_NALU_FORMAT_FOUR_BYTE_LENGTH_FIELD, + V4L2_OPT_NALU_FORMAT_ONE_FRAME_PER_BUFFER +}; + +enum { + V4L2_MPEG_VIDEO_BITRATE_MODE_STANDARD = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR + 1, + V4L2_MPEG_VIDEO_BITRATE_MODE_CVBR = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR + 2, +}; + +enum v4l2_mvx_gop_type { + V4L2_MVX_GOP_NONE, + V4L2_MVX_GOP_BIDI, + V4L2_MVX_GOP_LOWDELAY, + V4L2_MVX_GOP_PIRAMID, + V4L2_MVX_GOP_SVCT3, + V4L2_MVX_GOP_GDR, +}; + +struct v4l2_mvx_primary { + unsigned short x; + unsigned short y; +}; + +struct v4l2_buffer_param_region +{ + unsigned short mbx_left; /**< X coordinate of the left most macroblock */ + unsigned short mbx_right; /**< X coordinate of the right most macroblock */ + unsigned short mby_top; /**< Y coordinate of the top most macroblock */ + unsigned short mby_bottom; /**< Y coordinate of the bottom most macroblock */ + short qp_delta; /**< QP delta value. This region will be encoded + * with qp = qp_default + qp_delta. */ + unsigned short prio; + unsigned short force_intra; +}; + +struct v4l2_mvx_roi_regions +{ + unsigned int pic_index; + unsigned char qp_present; + unsigned char qp; + unsigned char roi_present; + unsigned char num_roi; + struct v4l2_buffer_param_region roi[V4L2_MVX_MAX_FRAME_REGIONS]; +}; + +struct v4l2_sei_user_data +{ + unsigned char flags; + #define V4L2_BUFFER_PARAM_USER_DATA_UNREGISTERED_VALID (1) + unsigned char uuid[16]; + char user_data[256 - 35]; + unsigned char user_data_len; +}; + +struct v4l2_mvx_dsl_ratio +{ + unsigned int hor; + unsigned int ver; +}; + +struct v4l2_mvx_long_term_ref +{ + unsigned int mode; + unsigned int period; +}; + +struct v4l2_buffer_param_rectangle +{ + unsigned short x_left; /* pixel x left edge (inclusive) */ + unsigned short x_right; /* pixel x right edge (exclusive) */ + unsigned short y_top; /* pixel y top edge (inclusive) */ + unsigned short y_bottom; /* pixel y bottom edge (exclusive) */ +}; + +/* input for encoder, + * indicate which parts of the source picture has changed. + * The encoder can (optionally) use this information to + * reduce memory bandwidth. + * + * n_rectangles=0 indicates the source picture is unchanged. + * + * This parameter only applies to the picture that immediately + * follows (and not to subsequent ones). + */ +struct v4l2_mvx_chr_config +{ + unsigned int pic_index; + unsigned int num_chr; + #define V4L2_MAX_FRAME_CHANGE_RECTANGLES 2 + struct v4l2_buffer_param_rectangle rectangle[V4L2_MAX_FRAME_CHANGE_RECTANGLES]; +}; + +struct v4l2_mvx_osd_cfg +{ + __u8 osd_inside_enable; + __u8 osd_inside_alpha_enable; + __u8 osd_inside_convert_color_enable; + __u8 osd_inside_alpha_value; /* as alpha range [0~16], use u8 */ + __u8 osd_inside_convert_color_threshold;/* threshold range [0~255], if input is 10bit, th * 4 */ + __u8 osd_inside_rgb2yuv_mode;/* 0-601L, 1-601F, 2-709_L, 3-709_F */ + __u16 osd_inside_start_x; /* pixel x left edge (inclusive) */ + __u16 osd_inside_start_y; /* pixel y top edge (inclusive) */ + __u16 reserved[3]; +}; + +struct v4l2_osd_config +{ + unsigned int pic_index; + unsigned int num_osd; + struct v4l2_mvx_osd_cfg osd_single_cfg[V4L2_MAX_FRAME_OSD_REGION];/* include single osd region config and index */ +}; + +struct v4l2_osd_info +{ + __u32 width_osd[V4L2_MAX_FRAME_OSD_REGION]; + __u32 height_osd[V4L2_MAX_FRAME_OSD_REGION]; + __u32 inputFormat_osd[V4L2_MAX_FRAME_OSD_REGION]; +}; + +/**************************************************************************** + * Custom IOCTL + ****************************************************************************/ + +#define VIDIOC_S_MVX_ROI_REGIONS _IOWR('V', BASE_VIDIOC_PRIVATE + 1, \ + struct v4l2_mvx_roi_regions) +#define VIDIOC_S_MVX_QP_EPR _IOWR('V', BASE_VIDIOC_PRIVATE + 2, \ + struct v4l2_buffer_param_qp) +#define VIDIOC_S_MVX_SEI_USERDATA _IOWR('V', BASE_VIDIOC_PRIVATE + 4, \ + struct v4l2_sei_user_data) +#define VIDIOC_S_MVX_DSL_RATIO _IOWR('V', BASE_VIDIOC_PRIVATE + 7, \ + struct v4l2_mvx_dsl_ratio) +#define VIDIOC_S_MVX_LONG_TERM_REF _IOWR('V', BASE_VIDIOC_PRIVATE + 8, \ + struct v4l2_mvx_long_term_ref) +#define VIDIOC_S_MVX_DSL_MODE _IOWR('V', BASE_VIDIOC_PRIVATE + 9, \ + int) +#define VIDIOC_S_MVX_MINI_FRAME_CNT _IOWR('V', BASE_VIDIOC_PRIVATE + 10, \ + int) +#define VIDIOC_S_MVX_STATS_MODE _IOWR('V', BASE_VIDIOC_PRIVATE + 11, \ + struct v4l2_buffer_param_enc_stats) +#define VIDIOC_S_MVX_CHR_CFG _IOWR('V', BASE_VIDIOC_PRIVATE + 12, \ + struct v4l2_mvx_chr_config) +#define VIDIOC_S_MVX_HUFF_TABLE _IOWR('V', BASE_VIDIOC_PRIVATE + 13, \ + struct v4l2_mvx_huff_table) +#define VIDIOC_S_MVX_SEAMLESS_TARGET _IOWR('V', BASE_VIDIOC_PRIVATE + 14, \ + struct v4l2_mvx_seamless_target) +#define VIDIOC_S_MVX_COLOR_CONV_COEF _IOWR('V', BASE_VIDIOC_PRIVATE + 15, \ + struct v4l2_mvx_color_conv_coef) +#define VIDIOC_S_MVX_RGB2YUV_COLOR_CONV_COEF _IOWR('V', BASE_VIDIOC_PRIVATE + 18, \ + struct v4l2_mvx_rgb2yuv_color_conv_coef) +#define VIDIOC_S_MVX_OSD_CONFIG _IOWR('V', BASE_VIDIOC_PRIVATE + 19, \ + struct v4l2_osd_config) +#define VIDIOC_S_MVX_OSD_INFO _IOWR('V', BASE_VIDIOC_PRIVATE + 20, \ + struct v4l2_osd_info) +#define VIDIOC_S_MVX_ENC_LAMBDA_SCALE _IOWR('V', BASE_VIDIOC_PRIVATE + 21, \ + struct v4l2_mvx_lambda_scale) + +/**************************************************************************** + * Custom controls + ****************************************************************************/ + +/* + * Video for Linux 2 custom controls. + */ +/* V4L2_CTRL_CLASS_MPEG rename to V4L2_CTRL_CLASS_CODEC from kernel version v5.11 */ +#ifndef V4L2_CTRL_CLASS_MPEG +#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC +#endif +enum v4l2_cid_mve_video { + V4L2_CID_MVE_VIDEO_AV1_PROFILE = V4L2_CTRL_CLASS_MPEG + 0x2000, + V4L2_CID_MVE_VIDEO_NALU_FORMAT, + V4L2_CID_MVE_VIDEO_STREAM_ESCAPING, + V4L2_CID_MVE_VIDEO_RESERVED, + V4L2_CID_MVE_VIDEO_VC1_PROFILE, + V4L2_CID_MVE_VIDEO_RESERVED2, + V4L2_CID_MVE_VIDEO_IGNORE_STREAM_HEADERS, + V4L2_CID_MVE_VIDEO_FRAME_REORDERING, + V4L2_CID_MVE_VIDEO_INTBUF_SIZE, + V4L2_CID_MVE_VIDEO_RESERVED3, + V4L2_CID_MVE_VIDEO_GOP_TYPE, + V4L2_CID_MVE_VIDEO_CONSTR_IPRED, + V4L2_CID_MVE_VIDEO_ENTROPY_SYNC, + V4L2_CID_MVE_VIDEO_TEMPORAL_MVP, + V4L2_CID_MVE_VIDEO_TILE_ROWS, + V4L2_CID_MVE_VIDEO_TILE_COLS, + V4L2_CID_MVE_VIDEO_MIN_LUMA_CB_SIZE, + V4L2_CID_MVE_VIDEO_MB_MASK, + V4L2_CID_MVE_VIDEO_VP9_PROB_UPDATE, + V4L2_CID_MVE_VIDEO_BITDEPTH_CHROMA, + V4L2_CID_MVE_VIDEO_BITDEPTH_LUMA, + V4L2_CID_MVE_VIDEO_FORCE_CHROMA_FORMAT, + V4L2_CID_MVE_VIDEO_RGB_TO_YUV_MODE, + V4L2_CID_MVE_VIDEO_BANDWIDTH_LIMIT, + V4L2_CID_MVE_VIDEO_CABAC_INIT_IDC, + V4L2_CID_MVE_VIDEO_VPX_B_FRAME_QP, + V4L2_CID_MVE_VIDEO_SECURE_VIDEO, + V4L2_CID_MVE_VIDEO_RESERVED5, + V4L2_CID_MVE_VIDEO_RESERVED6, + V4L2_CID_MVE_VIDEO_RESERVED7, + V4L2_CID_MVE_VIDEO_RESERVED8, + V4L2_CID_MVE_VIDEO_HRD_BUFFER_SIZE, + V4L2_CID_MVE_VIDEO_INIT_QP_I, + V4L2_CID_MVE_VIDEO_INIT_QP_P, + V4L2_CID_MVE_VIDEO_SAO_LUMA, + V4L2_CID_MVE_VIDEO_SAO_CHROMA, + V4L2_CID_MVE_VIDEO_QP_DELTA_I_P, + V4L2_CID_MVE_VIDEO_QP_REF_RB_EN, + V4L2_CID_MVE_VIDEO_RC_CLIP_TOP, + V4L2_CID_MVE_VIDEO_RC_CLIP_BOT, + V4L2_CID_MVE_VIDEO_QP_MAP_CLIP_TOP, + V4L2_CID_MVE_VIDEO_QP_MAP_CLIP_BOT, + V4L2_CID_MVE_VIDEO_MAX_QP_I, + V4L2_CID_MVE_VIDEO_MIN_QP_I, + V4L2_CID_MVE_VIDEO_VISIBLE_WIDTH, + V4L2_CID_MVE_VIDEO_VISIBLE_HEIGHT, + V4L2_CID_MVE_VIDEO_JPEG_QUALITY_LUMA, + V4L2_CID_MVE_VIDEO_JPEG_QUALITY_CHROMA, + V4L2_CID_MVE_VIDEO_RC_I_MODE, + V4L2_CID_MVE_VIDEO_RC_I_RATIO, + V4L2_CID_MVE_VIDEO_INTER_MED_BUF_SIZE, + V4L2_CID_MVE_VIDEO_SVCT3_LEVEL1_PERIOD, + V4L2_CID_MVE_VIDEO_GOP_RESET_PFRAMES, + V4L2_CID_MVE_VIDEO_LTR_RESET_PERIOD, + V4L2_CID_MVE_VIDEO_QP_FIXED, + V4L2_CID_MVE_VIDEO_GDR_NUMBER, + V4L2_CID_MVE_VIDEO_GDR_PERIOD, + V4L2_CID_MVE_VIDEO_MULIT_SPS_PPS, + V4L2_CID_MVE_VIDEO_ENABLE_VISUAL, + V4L2_CID_MVE_VIDEO_SCD_ENABLE, + V4L2_CID_MVE_VIDEO_SCD_PERCENT, + V4L2_CID_MVE_VIDEO_SCD_THRESHOLD, + V4L2_CID_MVE_VIDEO_AQ_SSIM_EN, + V4L2_CID_MVE_VIDEO_AQ_NEG_RATIO, + V4L2_CID_MVE_VIDEO_AQ_POS_RATIO, + V4L2_CID_MVE_VIDEO_AQ_QPDELTA_LMT, + V4L2_CID_MVE_VIDEO_AQ_INIT_FRM_AVG_SVAR, + V4L2_CID_MVE_VIDEO_COLOR_CONVERSION, + V4L2_CID_MVE_VIDEO_RGB2YUV_COLOR_CONV_COEF, + V4L2_CID_MVE_VIDEO_FORCED_UV_VALUE, + V4L2_CID_MVE_VIDEO_DSL_INTERP_MODE, + V4L2_CID_MVE_VIDEO_DISABLED_FEATURES, + V4L2_CID_MVE_VIDEO_ENABLE_ADAPTIVE_INTRA_BLOCK, + V4L2_CID_MVE_VIDEO_JOB_FRAMES, + V4L2_CID_MVE_VIDEO_MAX_BUFFERS_FOR_CAPTURE, + V4L2_CID_MVE_VIDEO_AV1_FSF, + V4L2_CID_MVE_VIDEO_PRIORITY, + V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_ANGULAR, + V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_PLANAR, + V4L2_CID_MVE_VIDEO_ENC_INTRA_IPENALTY_DC, + V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_ANGULAR, + V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_PLANAR, + V4L2_CID_MVE_VIDEO_ENC_INTER_IPENALTY_DC, +}; + +#define V4L2_SESSION_PRIORITY_PREEMPTION 0 +#define V4L2_SESSION_PRIORITY_HIGH 1 +#define V4L2_SESSION_PRIORITY_NORMAL 2 +#define V4L2_SESSION_PRIORITY_LOW 3 +#define V4L2_SESSION_PRIORITY_DEFAULT V4L2_SESSION_PRIORITY_NORMAL + +struct v4l2_buffer_param_enc_stats +{ + unsigned int mms_buffer_size; + unsigned int bitcost_buffer_size; + unsigned int qp_buffer_size; + unsigned int flags; + //ENC_STATS_FLAGS + #define V4L2_BUFFER_ENC_STATS_FLAG_MMS (1<<0) + #define V4L2_BUFFER_ENC_STATS_FLAG_BITCOST (1<<1) + #define V4L2_BUFFER_ENC_STATS_FLAG_QP (1<<2) + #define V4L2_BUFFER_ENC_STATS_FLAG_DROP (1<<3) + unsigned int pic_index_or_mb_size; +}; + + +/* block configuration uncompressed rows header. this configures the size of the + * uncompressed body. */ +struct v4l2_buffer_general_rows_uncomp_hdr +{ + unsigned char n_cols_minus1; /* number of quad cols in picture minus 1 */ + unsigned char n_rows_minus1; /* number of quad rows in picture minus 1 */ + unsigned char reserved[2]; +}; + +struct v4l2_buffer_general_block_configs +{ + unsigned char blk_cfg_type; + #define V4L2_BLOCK_CONFIGS_TYPE_NONE (0x00) + #define V4L2_BLOCK_CONFIGS_TYPE_ROW_UNCOMP (0xff) + unsigned char reserved[3]; + union + { + struct v4l2_buffer_general_rows_uncomp_hdr rows_uncomp; + } blk_cfgs; +}; + +/* input for encoder */ +struct v4l2_buffer_param_qp +{ + /* QP (quantization parameter) for encode. + * + * When used to set fixed QP for encode, with rate control + * disabled, then the valid ranges are: + * H264: 0-51 + * HEVC: 0-51 + * VP8: 0-63 + * VP9: 0-63 + * Note: The QP must be set separately for I, P and B frames. + * + * But when this message is used with the regions-feature, + * then the valid ranges are the internal bitstream ranges: + * H264: 0-51 + * HEVC: 0-51 + * VP8: 0-127 + * VP9: 0-255 + */ + int qp; + int epr_iframe_enable; +}; + +/* the block parameter record specifies the various properties of a quad */ +struct v4l2_block_param_record +{ + unsigned int qp_delta; + /* Bitset of four 4-bit QP delta values for a quad. + * For H.264 and HEVC these are qp delta values in the range -8 to +7. + * For Vp9 these are segment map values in the range 0 to 7. + */ + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16 (0) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_LEFT_16X16_SZ (6) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16 (6) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_TOP_RIGHT_16X16_SZ (6) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16 (12) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_LEFT_16X16_SZ (6) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16 (18) + #define V4L2_BLOCK_PARAM_RECORD_QP_DELTA_BOT_RIGHT_16X16_SZ (6) + + #define V4L2_BLOCK_PARAM_RECORD_QP_FORCE_FIELD (24) + #define V4L2_BLOCK_PARAM_RECORD_QP_FORCE_FIELD_SZ (5) + #define V4L2_BLOCK_PARAM_RECORD_QUAD_FORCE_INTRA (29) + #define V4L2_BLOCK_PARAM_RECORD_QUAD_FORCE_INTRA_SZ (1) + #define V4L2_BLOCK_PARAM_RECORD_QP_ABSOLUTE (30) + #define V4L2_BLOCK_PARAM_RECORD_QP_ABSOLUTE_SZ (1) + #define V4L2_BLOCK_PARAM_RECORD_QP_QUAD_SKIP (31) + #define V4L2_BLOCK_PARAM_RECORD_QP_QUAD_SKIP_SZ (1) + + #define V4L2_BLOCK_PARAM_RECORD_FORCE_NONE (0x00) + #define V4L2_BLOCK_PARAM_RECORD_FORCE_QP (0x01) + unsigned int min_qp; + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_TOP_LEFT_16X16 (0) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_TOP_LEFT_16X16_SZ (6) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_TOP_RIGHT_16X16 (6) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_TOP_RIGHT_16X16_SZ (6) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_BOT_LEFT_16X16 (12) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_BOT_LEFT_16X16_SZ (6) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_BOT_RIGHT_16X16 (18) + #define V4L2_BLOCK_PARAM_RECORD_MIN_QP_BOT_RIGHT_16X16_SZ (6) + +}; + +struct v4l2_buffer_general_rows_uncomp_body +{ + /* the size of this array is variable and not necessarily equal to 1. + * therefore the sizeof operator should not be used + */ + struct v4l2_block_param_record bpr[1]; +}; + +struct v4l2_core_buffer_header_general +{ + //uint64_t user_data_tag; // User supplied tracking identifier + //uint64_t app_handle; // Host buffer handle number + unsigned short type; // type of config, value is one of V4L2_BUFFER_GENERAL_TYPE_X + #define V4L2_BUFFER_GENERAL_TYPE_BLOCK_CONFIGS (1) /* block_configs */ + #define V4L2_BUFFER_GENERAL_TYPE_ENCODER_STATS (3) /* encoder_stats */ + unsigned short config_size; // size of the configuration + unsigned int buffer_size; + union { + struct v4l2_buffer_general_block_configs config; + } config; +}; + +struct v4l2_mvx_huff_table +{ + unsigned int type; + #define V4L2_OPT_HUFFMAN_TABLE_DC_LUMA (1) + #define V4L2_OPT_HUFFMAN_TABLE_AC_LUMA (2) + #define V4L2_OPT_HUFFMAN_TABLE_DC_CHROMA (4) + #define V4L2_OPT_HUFFMAN_TABLE_AC_CHROMA (8) + unsigned char dc_luma_code_lenght[16]; + unsigned char ac_luma_code_lenght[16]; + unsigned char dc_chroma_code_lenght[16]; + unsigned char ac_chroma_code_lenght[16]; + unsigned char dc_luma_table[162]; + unsigned char ac_luma_table[162]; + unsigned char dc_chroma_table[162]; + unsigned char ac_chroma_table[162]; +}; +struct v4l2_mvx_seamless_target +{ + unsigned int seamless_mode; + unsigned int target_width; + unsigned int target_height; + unsigned int target_stride[3]; + unsigned int target_size[3]; +}; +struct v4l2_mvx_color_conv_coef +{ + short coef[3][3]; + unsigned short offset[3]; +}; + +struct v4l2_mvx_rgb2yuv_color_conv_coef +{ + short coef[3 * 3]; //coef[Y|U|V][R|G|B] + unsigned char luma_range[2]; + unsigned char chroma_range[2]; + unsigned char rgb_range[2]; +}; + +struct v4l2_mvx_lambda_scale{ + unsigned short lambda_scale_i_q8; + unsigned short lambda_scale_sqrt_i_q8; + unsigned short lambda_scale_p_q8; + unsigned short lambda_scale_sqrt_p_q8; + unsigned short lambda_scale_b_ref_q8; + unsigned short lambda_scale_sqrt_b_ref_q8; + unsigned short lambda_scale_b_nonref_q8; + unsigned short lambda_scale_sqrt_b_nonref_q8; +}; +#endif /* _MVX_V4L2_CONTROLS_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/mvx_driver.c b/drivers/media/platform/cix/cix_vpu/mvx_driver.c new file mode 100755 index 000000000000..1efee7f6efdf --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_driver.c @@ -0,0 +1,72 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include +#include +#include +#include "mvx_if.h" +#include "mvx_dev.h" +#include "mvx_log_group.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("ARMChina"); +MODULE_DESCRIPTION("Tiube VPU Driver."); +MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); +MODULE_IMPORT_NS(DMA_BUF); + +static int __init mvx_init(void) +{ + int ret; + + ret = mvx_log_group_init("amvx"); + if (ret != 0) { + pr_err("Failed to create MVx driver logging.\n"); + return ret; + } + + ret = mvx_dev_init(); + if (ret != 0) { + pr_err("Failed to register MVx dev driver.\n"); + mvx_log_group_deinit(); + return ret; + } + + return 0; +} + +static void __exit mvx_exit(void) +{ + mvx_dev_exit(); + mvx_log_group_deinit(); +} + +module_init(mvx_init); +module_exit(mvx_exit); diff --git a/drivers/media/platform/cix/cix_vpu/mvx_log.c b/drivers/media/platform/cix/cix_vpu/mvx_log.c new file mode 100755 index 000000000000..472afc4aee01 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_log.c @@ -0,0 +1,1184 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#include "mvx_log.h" +#include "mvx_log_ram.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/****************************************************************************** + * External functions + ******************************************************************************/ +void mvx_sched_get_realtime_fps(struct list_head *sessions); + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#ifndef UNUSED +#define UNUSED(x) (void)(x) +#endif /* UNUSED */ + +/****************************************************************************** + * Types + ******************************************************************************/ + +/****************************************************************************** + * Variables + ******************************************************************************/ + +#ifdef MVX_LOG_FTRACE_ENABLE + +/** + * Map severity to string. + */ +static const char *const severity_to_name[] = { + "Panic", + "Error", + "Warning", + "Info", + "Debug", + "Verbose" +}; +#endif /* MVX_LOG_FTRACE_ENABLE */ + +/** + * Map severity to kernel log level. + */ +static const char *const severity_to_kern_level[] = { + KERN_EMERG, + KERN_ERR, + KERN_WARNING, + KERN_NOTICE, + KERN_INFO, + KERN_DEBUG +}; + +void mvx_log_get_util(struct timer_list *timer); + +/****************************************************************************** + * Static functions + ******************************************************************************/ + +/****************************************************************************** + * Log + * + * Directory i_node->i_private + * -------------------------------------------------------- + * mvx struct mvx_log * + * +-- group + * | +-- struct mvx_log_group * + * | +-- severity + * | +-- drain + * +-- drain + * +-- struct mvx_log_drain * + * + ******************************************************************************/ + +/** + * trim() - Trim of trailing new line. + * @str: Pointer to string. + */ +static void trim(char *str) +{ + size_t len = strlen(str); + + while (len-- > 0) { + if (str[len] != '\n') + break; + + str[len] = '\0'; + } +} + +/** + * lookup() - Search for child dentry with matching name. + * @parent: Pointer to parent dentry. + * @name: Name of dentry to look for. + * + * Return: Pointer to dentry, NULL if not found. + */ +static struct dentry *lookup(struct dentry *parent, + const char *name) +{ + struct dentry *child; + + /* Loop over directory entries in mvx/drain/. */ +#if (KERNEL_VERSION(3, 18, 0) <= LINUX_VERSION_CODE) + list_for_each_entry(child, &parent->d_subdirs, d_child) +#else + list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) +#endif + { + if (strcmp(name, child->d_name.name) == 0) + return child; + } + + return NULL; +} + +/** + * get_inode_private() - Get inode private member of parent directory. + * @file: File pointer. + * @parent: Number of parent directories. + * + * Return: Inode private member, or NULL on error. + */ +static void *get_inode_private(struct file *file, + int parent) +{ + struct dentry *d = file->f_path.dentry; + + while (d != NULL && parent-- > 0) + d = d->d_parent; + + if (d == NULL || d->d_inode == NULL) + return NULL; + + return d->d_inode->i_private; +} + +/** + * readme_read() - Read handle function for mvx/group//drain. The + * function returns the usage instruction message. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t readme_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + static const char msg[] = + "LOG GROUPS\n" + "\n" + "The avaible log groups can be found under 'group'.\n" + "$ ls group\n" + "\n" + "SEVERITY LEVELS\n" + " 0 - Panic\n" + " 1 - Error\n" + " 2 - Warning\n" + " 3 - Info\n" + " 4 - Debug\n" + " 5 - Verbose\n" + "\n" + "The severity level for a log group can be read and set at runtime.\n" + "$ cat group/general/severity\n" + "$ echo 3 > group/general/severity\n"; + + return simple_read_from_buffer(user_buffer, count, position, msg, + sizeof(msg)); +} + +/** + * group_util_read() - Read handle function for mvx/group//utilization. The + * function returns current VPU utilization. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t group_util_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//drain. */ + struct mvx_log_group *group = get_inode_private(file, 1); + char name[64]; + size_t len; + +#if !MVX_USE_UTILIZATION_TIMER + mvx_log_get_util(NULL); +#endif + if (group->enabled & MVX_LOG_PERF_UTILIZATION) + len = scnprintf(name, sizeof(name), "VPU Utilization: %d.%02d%%\n", + group->utilization / 100, group->utilization % 100); + else + len = scnprintf(name, sizeof(name), "VPU Performance Monitor is OFF\n"); + + return simple_read_from_buffer(user_buffer, count, position, name, len); +} + +/** + * group_avgfps_read() - Read handle function for mvx/group//avgfps. The + * function returns average fps of sessions. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t group_avgfps_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//average_fps. */ + struct mvx_log_group *group = get_inode_private(file, 1); + char *cache = group->avgfps + MVX_LOG_FPS_MSG_BUF_SIZE; + size_t len = 0; + + mutex_lock(&group->mutex); + if (group->enabled & MVX_LOG_PERF_FPS) { + if (group->has_update) { + int i; + int num = MVX_LOG_FPS_MSG_UNITS; + int offset = group->fps_msg_w * MVX_LOG_FPS_MSG_UNIT_SIZE; + char *start = group->avgfps + offset; + if (start[0] == 0) { + num = group->fps_msg_w; + offset = 0; + } + for (i = 0; i < num; i++) { + len += scnprintf(cache + len, MVX_LOG_FPS_MSG_UNIT_SIZE, + "%s", group->avgfps + offset); + offset += MVX_LOG_FPS_MSG_UNIT_SIZE; + if (offset == MVX_LOG_FPS_MSG_BUF_SIZE) + offset = 0; + } + group->has_update = false; + } else { + len = MVX_LOG_FPS_MSG_BUF_SIZE; + } + } else { + len = scnprintf(cache, MVX_LOG_FPS_MSG_UNIT_SIZE, "VPU fps stats is OFF\n"); + } + mutex_unlock(&group->mutex); + + return simple_read_from_buffer(user_buffer, count, position, cache, len); +} + +/** + * group_avgfps_read() - Read handle function for mvx/group//rtfps. The + * function returns realtime fps of sessions. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t group_rtfps_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//realtime_fps. */ + struct mvx_log_group *group = get_inode_private(file, 1); + char *cache = group->rtfps + MVX_LOG_FPS_MSG_BUF_SIZE; + size_t len = 0; + + if (group->enabled & MVX_LOG_PERF_FPS) { + int i; + int offset = 0; + group->rtfps_num = 0; + mvx_sched_get_realtime_fps(group->sessions); + group->rtfps_num = min(group->rtfps_num, MVX_LOG_FPS_MSG_UNITS); + for (i = 0; i < group->rtfps_num; i++) { + len += scnprintf(cache + len, MVX_LOG_FPS_MSG_UNIT_SIZE, + "%s", group->rtfps + offset); + offset += MVX_LOG_FPS_MSG_UNIT_SIZE; + } + } else { + len = scnprintf(cache, MVX_LOG_FPS_MSG_UNIT_SIZE, "VPU fps stats is OFF\n"); + } + + return simple_read_from_buffer(user_buffer, count, position, cache, len); +} + +/** + * group_status_read() - Read handle function for mvx/group//enable. The + * function returns VPU performance monitor status. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t group_status_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//enable. */ + struct mvx_log_group *group = get_inode_private(file, 1); + char buf[16]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "%d\n", group->enabled); + return simple_read_from_buffer(user_buffer, count, position, buf, len); +} + +/** + * group_status_write() - Write handle function for mvx/group//enable. The + * function returns VPU performance monitor status. + * @file: File pointer. + * @user_buffer: The user space buffer that is written to. + * @count: The maximum number of bytes to write. + * @position: The current position in the buffer. + */ +static ssize_t group_status_write(struct file *file, + const char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//enable. */ + struct mvx_log_group *group = get_inode_private(file, 1); + char enable_str[4]; + int enabled; + ssize_t size; + + /* Check that input is not larger that path buffer. */ + if (count < 1) { + pr_err("MVX: Invalid data.\n"); + return -EINVAL; + } + + size = simple_write_to_buffer(enable_str, sizeof(enable_str) - 1, + position, user_buffer, count); + enabled = simple_strtol(enable_str, NULL, 0) & MVX_LOG_PERF_ALL; + if ((enabled & MVX_LOG_PERF_UTILIZATION) && + !(group->enabled & MVX_LOG_PERF_UTILIZATION)) { + if (group->drain->reset) + group->drain->reset(group->drain); + } + + if ((enabled & MVX_LOG_PERF_FPS) && !(group->enabled & MVX_LOG_PERF_FPS)) { + int size = MVX_LOG_FPS_MSG_BUF_SIZE * 2; + if (group->avgfps) + memset(group->avgfps, 0, size); + if (group->rtfps) + memset(group->rtfps, 0, size); + group->fps_msg_w = 0; + } + + group->enabled = enabled; + + return size; +} + +/** + * group_drain_read() - Read handle function for mvx/group//drain. The + * function returns the name of the currently configured + * drain. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t group_drain_read(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//drain. */ + struct mvx_log_group *group = get_inode_private(file, 1); + struct mvx_log_drain *drain = group->drain; + char name[100]; + size_t len; + + if (drain == NULL || drain->dentry == NULL) { + pr_err("MVX: No drain assigned to log group.\n"); + return -EINVAL; + } + + len = scnprintf(name, sizeof(name), "%s\n", drain->dentry->d_name.name); + + return simple_read_from_buffer(user_buffer, count, position, name, len); +} + +/** + * group_drain_write() - Write handle function for mvx/group//drain. The + * function sets the drain for the group. If the drain + * does not match any registered drain, then error is + * returned to user space. + * @file: File pointer. + * @user_buffer: The user space buffer that is written to. + * @count: The maximum number of bytes to write. + * @position: The current position in the buffer. + */ +static ssize_t group_drain_write(struct file *file, + const char __user *user_buffer, + size_t count, + loff_t *position) +{ + /* File path mvx/group//drain. */ + struct mvx_log_group *group = get_inode_private(file, 1); + struct mvx_log *log = get_inode_private(file, 3); + struct dentry *dentry; + char drain_str[100]; + ssize_t size; + + /* Check that input is not larger that path buffer. */ + if (count > (sizeof(drain_str) - 1)) { + pr_err("MVX: Input overflow.\n"); + + return -EINVAL; + } + + /* Append input to path. */ + size = simple_write_to_buffer(drain_str, sizeof(drain_str) - 1, + position, user_buffer, count); + drain_str[count] = '\0'; + trim(drain_str); + + dentry = lookup(log->drain_dir, drain_str); + + if (IS_ERR_OR_NULL(dentry)) { + pr_warn("MVX: No drain matching '%s'.\n", drain_str); + return -EINVAL; + } + + /* Assign drain to log group. */ + group->drain = dentry->d_inode->i_private; + + return size; +} + +/** + * drain_ram_read() - Read the RAM buffer. + * @drain: The RAM buffer drain. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + * @pos: The last used position of the drain buffer + */ +static ssize_t drain_ram_read(struct mvx_log_drain_ram *drain, + char __user *user_buffer, + size_t count, + loff_t *position, + size_t pos) +{ + ssize_t n = 0; + + /* Make sure position is not beyond end of file. */ + if (*position > pos) + return -EINVAL; + + /* If position is more than BUFFER_SIZE bytes behind, then fast forward + * to current position minus BUFFER_SIZE. + */ + if ((pos - *position) > drain->buffer_size) + *position = pos - drain->buffer_size; + + /* Copy data to user space. */ + while ((n < count) && (*position < pos)) { + size_t offset; + size_t length; + + /* Offset in circular buffer. */ + offset = *position & (drain->buffer_size - 1); + + /* Available number of bytes. */ + length = min((size_t)(pos - *position), count - n); + + /* Make sure length does not go beyond end of circular buffer. + */ + length = min(length, drain->buffer_size - offset); + + /* Copy data from kernel- to user space. */ + length -= copy_to_user(&user_buffer[n], &drain->buf[offset], + length); + + /* No bytes were copied. Return error. */ + if (length == 0) + return -EINVAL; + + *position += length; + n += length; + } + + return n; +} + +/** + * drain_ram_read_msg() - Read of the RAM file. + * @file: File pointer. + * @user_buffer: The user space buffer that is read to. + * @count: The maximum number of bytes to read. + * @position: The current position in the buffer. + */ +static ssize_t drain_ram_read_msg(struct file *file, + char __user *user_buffer, + size_t count, + loff_t *position) +{ + struct mvx_log_drain_ram *drain = get_inode_private(file, 1); + + while (*position == drain->write_pos) { + int ret; + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + /* Block until there is data available. */ + ret = wait_event_interruptible(drain->queue, + *position < drain->write_pos); + if (ret != 0) + return -EINTR; + } + + return drain_ram_read(drain, user_buffer, count, position, + drain->write_pos); +} + +/** + * drain_ram_msg_poll() - Handle poll. + * @file: File pointer. + * @wait: The poll table to which the wait queue is added. + */ +static unsigned int drain_ram_msg_poll(struct file *file, + poll_table *wait) +{ + unsigned int mask = 0; + struct mvx_log_drain_ram *drain = get_inode_private(file, 1); + + poll_wait(file, &drain->queue, wait); + + if (file->f_pos < drain->write_pos) + mask |= POLLIN | POLLRDNORM; + else if (file->f_pos > drain->write_pos) + mask |= POLLERR; + + return mask; +} + +/** + * drain_ram_ioctl() - Handle IOCTL. + * @file: File pointer. + * @cmd: The value of the command to be handled. + * @arg: Extra argument. + */ +static long drain_ram_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct mvx_log_drain_ram *drain_ram = get_inode_private(file, 1); + + switch (cmd) { + case MVX_LOG_IOCTL_CLEAR: + drain_ram->read_pos = drain_ram->write_pos; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * drain_ram_open() - Open file handle function. + * @inode: The inode associated with the file. + * @file: Pointer to the opened file. + * + * Return: 0 Always succeeds. + */ +static int drain_ram_open(struct inode *inode, + struct file *file) +{ + struct mvx_log_drain_ram *drain_ram = get_inode_private(file, 1); + + file->f_pos = drain_ram->read_pos; + + return 0; +} + +/****************************************************************************** + * External interface + ******************************************************************************/ + +int mvx_log_construct(struct mvx_log *log, + const char *entry_name) +{ + int ret; + static const struct file_operations readme_fops = { + .read = readme_read + }; + struct dentry *dentry; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + pr_info( + "MVX: Debugfs is not enabled. '%s' dir is not created.\n", + entry_name); + return 0; + } + + log->mvx_dir = debugfs_create_dir(entry_name, NULL); + if (IS_ERR_OR_NULL(log->mvx_dir)) { + pr_err("MVX: Failed to create '%s' dir.\n", entry_name); + return -ENOMEM; + } + + log->log_dir = debugfs_create_dir("log", log->mvx_dir); + if (IS_ERR_OR_NULL(log->log_dir)) { + pr_err("MVX: Failed to create 'log' dir.\n"); + ret = -ENOMEM; + goto error; + } + + log->log_dir->d_inode->i_private = log; + + log->drain_dir = debugfs_create_dir("drain", log->log_dir); + if (IS_ERR_OR_NULL(log->drain_dir)) { + pr_err("MVX: Failed to create 'drain' dir.\n"); + ret = -ENOMEM; + goto error; + } + + log->group_dir = debugfs_create_dir("group", log->log_dir); + if (IS_ERR_OR_NULL(log->group_dir)) { + pr_err("MVX: Failed to create 'group' dir.\n"); + ret = -ENOMEM; + goto error; + } + + /* Create /drain. */ + dentry = debugfs_create_file("README", 0400, log->log_dir, NULL, + &readme_fops); + if (IS_ERR_OR_NULL(dentry)) { + pr_err("MVX: Failed to create 'README'.\n"); + ret = -ENOMEM; + goto error; + } + + return 0; + +error: + debugfs_remove_recursive(log->mvx_dir); + return ret; +} + +void mvx_log_destruct(struct mvx_log *log) +{ + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_remove_recursive(log->mvx_dir); +} + +/****************************************************************************** + * Log Drain + ******************************************************************************/ + +static int drain_construct(struct mvx_log_drain *drain, + mvx_print_fptr print, + mvx_data_fptr data, + mvx_reset_fptr reset) +{ + drain->print = print; + drain->data = data; + drain->reset = reset; + + return 0; +} + +static void drain_destruct(struct mvx_log_drain *drain) +{ + UNUSED(drain); +} + +static void drain_dmesg_print(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + const char *tag, + const char *msg, + const unsigned int n_args, + ...) +{ + va_list args; + char fmt[500]; + + severity = min_t(int, severity, MVX_LOG_VERBOSE); + + snprintf(fmt, sizeof(fmt), "%s%s: %s\n", + severity_to_kern_level[severity], tag, msg); + fmt[sizeof(fmt) - 1] = '\0'; + + va_start(args, n_args); + vprintk(fmt, args); + va_end(args); +} + +static void drain_dmesg_data(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + struct iovec *vec, + size_t count) +{ + size_t i; + + pr_info("count=%zu\n", count); + + for (i = 0; i < count; ++i) { + const char *p = vec[i].iov_base; + size_t length = vec[i].iov_len; + + pr_info(" length=%zu\n", length); + + while (length > 0) { + size_t j = min_t(size_t, length, 32); + char buf[3 + 32 * 3 + 1]; + size_t n = 0; + + length -= j; + + n += scnprintf(&buf[n], sizeof(buf) - n, " "); + + while (j-- > 0) + n += scnprintf(&buf[n], sizeof(buf) - n, + " %02x", *p++); + + pr_info("%s\n", buf); + } + } +} + +int mvx_log_drain_dmesg_construct(struct mvx_log_drain *drain) +{ + return drain_construct(drain, drain_dmesg_print, drain_dmesg_data, NULL); +} + +void mvx_log_drain_dmesg_destruct(struct mvx_log_drain *drain) +{ + drain_destruct(drain); +} + +int mvx_log_drain_add(struct mvx_log *log, + const char *name, + struct mvx_log_drain *drain) +{ + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + pr_info( + "MVX: Debugfs is not enabled. '%s' dir is not created.\n", + name); + return 0; + } + + /* Create directory. */ + drain->dentry = debugfs_create_dir(name, log->drain_dir); + if (IS_ERR_OR_NULL(drain->dentry)) { + pr_err("MVX: Failed to create '%s' dir.\n", name); + return -ENOMEM; + } + + /* Store pointer to drain object in inode private data. */ + drain->dentry->d_inode->i_private = drain; + + return 0; +} + +static void drain_ram_data(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + struct iovec *vec, + size_t count) +{ + struct mvx_log_drain_ram *drain_ram = + (struct mvx_log_drain_ram *)drain; + size_t i; + size_t length; + size_t pos; + int sem_taken; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; + + /* Calculate the total length of the output. */ + for (i = 0, length = 0; i < count; ++i) + length += vec[i].iov_len; + + /* Round up to next 32-bit boundary. */ + length = (length + 3) & ~3; + + if (length > drain_ram->buffer_size) { + pr_err( + "MVX: Logged data larger than output buffer. length=%zu, buffer_length=%zu.\n", + length, + (size_t)drain_ram->buffer_size); + return; + } + + sem_taken = down_interruptible(&drain_ram->sem); + + pos = drain_ram->write_pos & (drain_ram->buffer_size - 1); + + /* Loop over scatter input. */ + for (i = 0; i < count; ++i) { + const char *buf = vec[i].iov_base; + size_t len = vec[i].iov_len; + + /* Copy log message to output buffer. */ + while (len > 0) { + size_t n = min(len, drain_ram->buffer_size - pos); + + memcpy(&drain_ram->buf[pos], buf, n); + + len -= n; + buf += n; + pos = (pos + n) & (drain_ram->buffer_size - 1); + } + } + + /* Update write_pos. Length has already been 4 byte aligned */ + drain_ram->write_pos += length; + + if (sem_taken == 0) + up(&drain_ram->sem); + + wake_up_interruptible(&drain_ram->queue); +} + +static void drain_ram_print(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + const char *tag, + const char *msg, + const unsigned int n_args, + ...) +{ + char buf[500]; + va_list args; + size_t n = 0; + struct mvx_log_header header; + struct iovec vec[2]; + struct timespec64 timespec; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; + + /* Write the log message. */ + va_start(args, n_args); + n += vscnprintf(buf, sizeof(buf), msg, args); + va_end(args); + + ktime_get_real_ts64(×pec); + + header.magic = MVX_LOG_MAGIC; + header.length = n; + header.type = MVX_LOG_TYPE_TEXT; + header.severity = severity; + header.timestamp.sec = timespec.tv_sec; + header.timestamp.nsec = timespec.tv_nsec; + + vec[0].iov_base = &header; + vec[0].iov_len = sizeof(header); + + vec[1].iov_base = buf; + vec[1].iov_len = n; + + drain_ram_data(drain, severity, vec, 2); +} + +static void drain_ram_reset(struct mvx_log_drain *drain) +{ + struct mvx_log_drain_ram *drain_ram = + (struct mvx_log_drain_ram *)drain; + int sem_taken = down_interruptible(&drain_ram->sem); + memset(drain_ram->buf, 0, drain_ram->buffer_size); + drain_ram->read_pos = 0; + drain_ram->write_pos = 0; + if (sem_taken == 0) + up(&drain_ram->sem); +} + +int mvx_log_drain_ram_construct(struct mvx_log_drain_ram *drain, + size_t buffer_size) +{ + int ret; + + ret = drain_construct(&drain->base, drain_ram_print, drain_ram_data, + drain_ram_reset); + if (ret != 0) + return ret; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + pr_info("MVX: No Debugfs no RAM drain.\n"); + return 0; + } + + drain->buf = vmalloc(buffer_size); + if (drain->buf == NULL) + return -ENOMEM; + + *(size_t *) &drain->buffer_size = buffer_size; + drain->read_pos = 0; + drain->write_pos = 0; + init_waitqueue_head(&drain->queue); + sema_init(&drain->sem, 1); + + return 0; +} + +void mvx_log_drain_ram_destruct(struct mvx_log_drain_ram *drain) +{ + if (IS_ENABLED(CONFIG_DEBUG_FS)) + vfree(drain->buf); + + drain_destruct(&drain->base); +} + +int mvx_log_drain_ram_add(struct mvx_log *log, + const char *name, + struct mvx_log_drain_ram *drain) +{ + static const struct file_operations drain_ram_msg = { + .read = drain_ram_read_msg, + .poll = drain_ram_msg_poll, + .open = drain_ram_open, + .unlocked_ioctl = drain_ram_ioctl + }; + struct dentry *dentry; + int ret; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + pr_info( + "MVX: Debugfs is not enabled. RAM drain dirs are not created.\n"); + return 0; + } + + ret = mvx_log_drain_add(log, name, &drain->base); + if (ret != 0) + return ret; + + /* Create dentry. */ + dentry = debugfs_create_file("msg", 0600, drain->base.dentry, NULL, + &drain_ram_msg); + if (IS_ERR_OR_NULL(dentry)) { + pr_err("MVX: Failed to create '%s/msg.\n", name); + ret = -ENOMEM; + goto error; + } + + return 0; + +error: + debugfs_remove_recursive(drain->base.dentry); + + return ret; +} + +#ifdef MVX_LOG_FTRACE_ENABLE +static void drain_ftrace_print(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + const char *tag, + const char *msg, + const unsigned int n_args, + ...) +{ + va_list args; + char fmt[500]; + + severity = min_t(int, severity, MVX_LOG_VERBOSE); + + snprintf(fmt, sizeof(fmt), "%s %s: %s\n", severity_to_name[severity], + tag, msg); + fmt[sizeof(fmt) - 1] = '\0'; + + va_start(args, n_args); + ftrace_vprintk(fmt, args); + va_end(args); +} + +static void drain_ftrace_data(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + struct iovec *vec, + size_t count) +{ + size_t i; + + trace_printk("count=%zu\n", count); + + for (i = 0; i < count; ++i) { + const char *p = vec[i].iov_base; + size_t length = vec[i].iov_len; + + trace_printk(" length=%zu\n", length); + + while (length > 0) { + size_t j = min_t(size_t, length, 32); + char buf[3 + 32 * 3 + 1]; + size_t n = 0; + + length -= j; + + n += scnprintf(&buf[n], sizeof(buf) - n, " "); + + while (j-- > 0) + n += scnprintf(&buf[n], sizeof(buf) - n, + " %02x", *p++); + + trace_printk("%s\n", buf); + } + } +} + +int mvx_log_drain_ftrace_construct(struct mvx_log_drain *drain) +{ + return drain_construct(drain, drain_ftrace_print, drain_ftrace_data, NULL); +} + +void mvx_log_drain_ftrace_destruct(struct mvx_log_drain *drain) +{ + drain_destruct(drain); +} + +#endif /* MVX_LOG_FTRACE_ENABLE */ + +/****************************************************************************** + * Log Group + ******************************************************************************/ + +void mvx_log_group_construct(struct mvx_log_group *group, + const char *tag, + const enum mvx_log_severity severity, + struct mvx_log_drain *drain) +{ + group->tag = tag; + group->severity = severity; + group->drain = drain; + group->enabled = 0; + group->utilization = -1; + atomic_set(&group->freq, 300000000); + group->ts.tv_sec = 0; + group->ts.tv_nsec = 0; + group->fps_msg_w = 0; + mutex_init(&group->mutex); + group->avgfps = NULL; + group->rtfps = NULL; + if (!strncmp(tag, "MVX perf", strlen("MVX perf"))) { + int size = MVX_LOG_FPS_MSG_BUF_SIZE * 2; // the 2nd half for msg cache + group->avgfps = vmalloc(size); + memset(group->avgfps, 0, size); + group->rtfps = vmalloc(size); + memset(group->rtfps, 0, size); + } +} + +int mvx_log_group_add(struct mvx_log *log, + const char *name, + struct mvx_log_group *group) +{ + //struct dentry *dentry; + int ret; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + pr_info( + "MVX: Debugfs is not enabled. '%s' dir is not created.\n", + name); + return 0; + } + + /* Create directory. */ + group->dentry = debugfs_create_dir(name, log->group_dir); + if (IS_ERR_OR_NULL(group->dentry)) { + pr_err("MVX: Failed to create '%s' dir.\n", name); + ret = -ENOMEM; + goto error; + } + + /* Store reference to group object in inode private data. */ + group->dentry->d_inode->i_private = group; + + if (!strncmp(name, "perf", strlen("perf"))) { + static const struct file_operations group_util_fops = { + .read = group_util_read, + }; + static const struct file_operations group_avgfps_fops = { + .read = group_avgfps_read, + }; + static const struct file_operations group_rtfps_fops = { + .read = group_rtfps_read, + }; + static const struct file_operations group_status_fops = { + .read = group_status_read, + .write = group_status_write, + }; + /* Create /utilization. */ + debugfs_create_file("utilization", 0400, group->dentry, NULL, + &group_util_fops); + + /* Create /avgfps. */ + debugfs_create_file("average_fps", 0400, group->dentry, NULL, + &group_avgfps_fops); + + /* Create /rtfps. */ + debugfs_create_file("realtime_fps", 0400, group->dentry, NULL, + &group_rtfps_fops); + + /* Create /enable. */ + debugfs_create_file("enable", 0600, group->dentry, NULL, + &group_status_fops); + } else { + /* Create /drain. */ + static const struct file_operations group_drain_fops = { + .read = group_drain_read, + .write = group_drain_write + }; + debugfs_create_file("drain", 0600, group->dentry, NULL, + &group_drain_fops); + + /* Create /severity. */ + debugfs_create_u32("severity", 0600, group->dentry, + &group->severity); + } + + return 0; + +error: + mvx_log_group_destruct(group); + return ret; +} + +void mvx_log_group_destruct(struct mvx_log_group *group) +{ + if (group->avgfps) + vfree(group->avgfps); + if (group->rtfps) + vfree(group->rtfps); +} + +const char *mvx_log_strrchr(const char *s) +{ + const char *p = strrchr(s, '/'); + + return (p == NULL) ? s : p + 1; +} diff --git a/drivers/media/platform/cix/cix_vpu/mvx_log.h b/drivers/media/platform/cix/cix_vpu/mvx_log.h new file mode 100755 index 000000000000..1ede55847640 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_log.h @@ -0,0 +1,427 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef MVX_LOG_H +#define MVX_LOG_H + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#include +#include +#include +#include +#include + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#define MVX_USE_UTILIZATION_TIMER 0 + +/** + * Print a log message. + * + * @_lg: Pointer to log group. + * @_severity: Severity. + * @_fmt: Format string. + */ +#define MVX_LOG_PRINT(_lg, _severity, _fmt, ...) \ + do { \ + if ((_severity) <= (_lg)->severity) { \ + __MVX_LOG_PRINT(_lg, _severity, _fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +/** + * Print a log message for a session. + * + * @_lg: Pointer to log group. + * @_severity: Severity. + * @_session: Pointer to session. + * @_fmt: Format string. + */ +#define MVX_LOG_PRINT_SESSION(_lg, _severity, _session, _fmt, ...) \ + do { \ + if ((_severity) <= (_lg)->severity) { \ + __MVX_LOG_PRINT(_lg, _severity, "%px " _fmt, _session, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +/** + * Print binary data. + * + * @_lg: Pointer to log group. + * @_severity: Severity. + * @_vec: Scatter input vector data. + * @_count: _vec array size. + */ +#define MVX_LOG_DATA(_lg, _severity, _vec, _count) \ + do { \ + if ((_severity) <= (_lg)->severity) { \ + (_lg)->drain->data((_lg)->drain, _severity, _vec, \ + _count); \ + } \ + } while (0) + +/** + * Check if severity level for log group is enabled. + * + * @_lg: Pointer to log group. + * @_severity: Severity. + */ +#define MVX_LOG_ENABLED(_lg, _severity) \ + ((_severity) <= (_lg)->severity) + +/** + * Execute function if log group is enabled. + * + * @_lg: Pointer to log group. + * @_severity: Severity. + * @_exec: The function to be executed. + */ +#define MVX_LOG_EXECUTE(_lg, _severity, _exec) \ + do { \ + if (MVX_LOG_ENABLED(_lg, _severity)) { \ + _exec; \ + } \ + } while (0) + +#ifdef MVX_LOG_PRINT_FILE_ENABLE +#define __MVX_LOG_PRINT(_lg, _severity, _fmt, ...) \ + ((_lg)->drain->print((_lg)->drain, _severity, (_lg)->tag, \ + _fmt " (%s:%d)", \ + __MVX_LOG_N_ARGS(__VA_ARGS__), \ + ## __VA_ARGS__, \ + mvx_log_strrchr(__FILE__), __LINE__)) +#else +#define __MVX_LOG_PRINT(_lg, _severity, _fmt, ...) \ + ((_lg)->drain->print((_lg)->drain, _severity, (_lg)->tag, _fmt, \ + __MVX_LOG_N_ARGS(__VA_ARGS__), \ + ## __VA_ARGS__)) +#endif /* MVX_LOG_PRINT_FILE_ENABLE */ + +#define __MVX_LOG_N_ARGS(...) \ + __MVX_LOG_COUNT(dummy, ## __VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __MVX_LOG_COUNT(_0, _1, _2, _3, _4, _5, _6, _7, _8, N, ...) N + +#define MVX_LOG_PERF_UTILIZATION 1 +#define MVX_LOG_PERF_FPS 2 +#define MVX_LOG_PERF_ALL (MVX_LOG_PERF_UTILIZATION | MVX_LOG_PERF_FPS) +#define MVX_LOG_FPS_MSG_UNIT_SIZE 128 +#define MVX_LOG_FPS_MSG_UNITS 32 +#define MVX_LOG_FPS_MSG_BUF_SIZE (MVX_LOG_FPS_MSG_UNIT_SIZE * MVX_LOG_FPS_MSG_UNITS) + +#define MVX_WAR_LOG_LEVEL MVX_LOG_INFO + +/****************************************************************************** + * Types + ******************************************************************************/ + +/** + * enum mvx_log_severity - Severity levels. + */ +enum mvx_log_severity { + MVX_LOG_PANIC, + MVX_LOG_ERROR, + MVX_LOG_WARNING, + MVX_LOG_INFO, + MVX_LOG_DEBUG, + MVX_LOG_VERBOSE, + MVX_LOG_MAX +}; + +struct mvx_log_drain; + +/** + * mvx_print_fptr() - Function pointer to output text messages. + * + * @drain: Pointer to drain. + * @severity: Severity level. + * @tag: Log group tag. + * @fmt: Format string. + * @n_args: Number of arguments to format string. + */ +typedef void (*mvx_print_fptr)(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + const char *tag, + const char *fmt, + const unsigned int n_args, + ...); + +/** + * mvx_data_fptr() - Function pointer to output binary data. + * + * @drain: Pointer to drain. + * @severity: Severity level. + * @vec: Pointer to the buffers that are copied. + * @count: The number of vec buffers. + */ +typedef void (*mvx_data_fptr)(struct mvx_log_drain *drain, + enum mvx_log_severity severity, + struct iovec *vec, + size_t count); + +/** + * mvx_reset_fptr() - Function pointer to reset data buffer. + * + * @drain: Pointer to drain. + */ +typedef void (*mvx_reset_fptr)(struct mvx_log_drain *drain); + +/** + * struct mvx_log_drain - Structure with information about the drain. The drain + * handles the formatting and redirection of the log + * messages. + * @print: Print function pointer. + * @data: Data function pointer. + * @dentry: Debugfs dentry. + */ +struct mvx_log_drain { + mvx_print_fptr print; + mvx_data_fptr data; + mvx_reset_fptr reset; + struct dentry *dentry; +}; + +/** + * struct mvx_log_drain_ram - Structure describing a specialized RAM drain. + * @base: Base class. + * @buf: Pointer to output buffer. + * @buffer_size: Size of the buffer. Must be power of 2. + * @read_pos: Read position when a new file handle is opened. Is + * updated when the buffer is cleared. + * @write_pos: Current write position in RAM buffer. + * @queue: Wait queue for blocking IO. + * @sem: Semaphore to prevent concurrent writes. + */ +struct mvx_log_drain_ram { + struct mvx_log_drain base; + char *buf; + const size_t buffer_size; + size_t read_pos; + size_t write_pos; + wait_queue_head_t queue; + struct semaphore sem; +}; + +/** + * struct mvx_log_group - Structure describing log group. The log group filters + * which log messages that shall be forwarded to the + * drain. + * @tag: Name of log group. + * @severity: Severity level. + * @drain: Drain. + * @dentry: Debugfs dentry. + */ +struct mvx_log_group { + const char *tag; + enum mvx_log_severity severity; + struct mvx_log_drain *drain; + int enabled; + int utilization; + atomic_t freq; + struct timespec64 ts; + struct dentry *dentry; + char *rtfps; + int rtfps_num; + char *avgfps; + int fps_msg_w; + bool has_update; + struct list_head *sessions; + struct mutex mutex; +}; + +/** + * struct mvx_log - Log class that keeps track of registered groups and drains. + */ +struct mvx_log { + struct dentry *mvx_dir; + struct dentry *log_dir; + struct dentry *drain_dir; + struct dentry *group_dir; +}; + +struct mvx_duration { + uint32_t start; + uint32_t end; +}; + +struct mvx_time { + struct timespec64 timespec; + struct mvx_duration parse; + struct mvx_duration pipe; +}; + +/**************************************************************************** + * Log + ****************************************************************************/ + +/** + * mvx_log_construct() - Log constructor. + * @log: Pointer to log. + * @entry_name: The name of the directory + * + * Return: 0 on success, else error code. + */ +int mvx_log_construct(struct mvx_log *log, + const char *entry_name); + +/** + * mvx_log_destruct() - Log destructor. + * @log: Pointer to log. + */ +void mvx_log_destruct(struct mvx_log *log); + +/**************************************************************************** + * Drain + ****************************************************************************/ + +/** + * mvx_log_drain_dmesg_construct() - Dmesg drain constructor. + * @drain: Pointer to drain. + * + * Return: 0 on success, else error code. + */ +int mvx_log_drain_dmesg_construct(struct mvx_log_drain *drain); + +/** + * mvx_log_drain_dmesg_destruct() - Dmesg drain destructor. + * @drain: Pointer to drain. + */ +void mvx_log_drain_dmesg_destruct(struct mvx_log_drain *drain); + +/** + * mvx_log_drain_add() - Add drain to log. + * @log: Pointer to log. + * @name: Name of drain. + * @drain: Pointer to drain. + * + * Return: 0 on success, else error code. + */ +int mvx_log_drain_add(struct mvx_log *log, + const char *name, + struct mvx_log_drain *drain); + +/** + * mvx_log_drain_ram_construct() - RAM drain constructor. + * @drain: Pointer to drain. + * @print: Print function pointer. + * @data: Data function pointer. + * @buffer_size: The size of the RAM drain buffer. + * + * Return: 0 on success, else error code. + */ +int mvx_log_drain_ram_construct(struct mvx_log_drain_ram *drain, + size_t buffer_size); + +/** + * mvx_log_drain_ram_destruct() - RAM drain destructor. + * @drain: Pointer to drain. + */ +void mvx_log_drain_ram_destruct(struct mvx_log_drain_ram *drain); + +/** + * mvx_log_drain_ram_add() - Derived function to add RAM drain to log. + * @log: Pointer to log. + * @name: Name of drain. + * @drain: Pointer to drain. + * + * Return: 0 on success, else error code. + */ +int mvx_log_drain_ram_add(struct mvx_log *log, + const char *name, + struct mvx_log_drain_ram *drain); + +#ifdef MVX_LOG_FTRACE_ENABLE + +/** + * mvx_log_drain_ftrace_construct() - Ftrace drain constructor. + * @drain: Pointer to drain. + * + * Return: 0 on success, else error code. + */ +int mvx_log_drain_ftrace_construct(struct mvx_log_drain *drain); + +/** + * mvx_log_drain_ftrace_destruct() - Ftrace drain destructor. + * @drain: Pointer to drain. + */ +void mvx_log_drain_ftrace_destruct(struct mvx_log_drain *drain); + +#endif /* MVX_LOG_FTRACE_ENABLE */ + +/**************************************************************************** + * Group + ****************************************************************************/ + +/** + * mvx_log_group_construct() - Group constructor. + * @group: Pointer to group. + * @tag: Name of the group, to be used in log messages. + * @severity: Minimum severity to output log message. + * @drain: Pointer to drain. + */ +void mvx_log_group_construct(struct mvx_log_group *group, + const char *tag, + const enum mvx_log_severity severity, + struct mvx_log_drain *drain); + +/** + * mvx_log_group_add() - Add a group with given name to log. + * @log: Pointer to log. + * @name: Name of group. + * @group: Pointer to group. + * + * Return: 0 on success, else error code. + */ +int mvx_log_group_add(struct mvx_log *log, + const char *name, + struct mvx_log_group *group); + +/** + * mvx_log_group_destruct() - Group destructor. + * @group: Pointer to group. + */ +void mvx_log_group_destruct(struct mvx_log_group *group); + +/** + * mvx_log_strrchr() - Find last occurrence of '/' in string. + * @s: Pointer to string. + * + * Return: Pointer to '/'+1, or pointer to begin of string. + */ +const char *mvx_log_strrchr(const char *s); + +#endif /* MVX_LOG_H */ diff --git a/drivers/media/platform/cix/cix_vpu/mvx_log_group.c b/drivers/media/platform/cix/cix_vpu/mvx_log_group.c new file mode 100755 index 000000000000..5040bf54ffed --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_log_group.c @@ -0,0 +1,383 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#include +#include +#include "mvx_log.h" + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#define MVX_TIME_NUM (1 << 11) // 2048 +#define MVX_UTIL_INTERVAL_SEC 1 +#define MVX_UTIL_INTERVAL_MSEC (MVX_UTIL_INTERVAL_SEC * MSEC_PER_SEC) +#define MVX_UTIL_INTERVAL_NSEC (MVX_UTIL_INTERVAL_SEC * NSEC_PER_SEC) +#define MVX_MHZ_TO_TICKS(freq) (freq / 1000 / 1000 * 62500) +#define MVX_UTIL_INTERVAL_TICKS(freq) (MVX_MHZ_TO_TICKS(freq) * MVX_UTIL_INTERVAL_SEC) + +/****************************************************************************** + * Private variables + ******************************************************************************/ + +static struct mvx_log log; + +static struct mvx_log_drain drain_dmesg_if; +static struct mvx_log_drain_ram drain_ram0_if; +static struct mvx_log_drain_ram drain_ram1_if; + +#ifdef MVX_LOG_FTRACE_ENABLE +static struct mvx_log_drain drain_ftrace_if; +#endif /* MVX_LOG_FTRACE_ENABLE */ + +struct mvx_log_group mvx_log_if; +struct mvx_log_group mvx_log_fwif_if; +struct mvx_log_group mvx_log_perf; +struct mvx_log_group mvx_log_session_if; +struct mvx_log_group mvx_log_dev; + +struct mvx_duration *dur_buf = NULL; +#if MVX_USE_UTILIZATION_TIMER +void mvx_log_get_util(struct timer_list *timer); +DEFINE_TIMER(util_timer, mvx_log_get_util); +#endif + +/****************************************************************************** + * Static functions + ******************************************************************************/ + +static int mvx_log_get_time_range(struct timespec64 *start, + struct mvx_duration *crange, int *count) +{ + unsigned int i, j; + unsigned int interval_in_dticks; + int sem_taken; + int not_found_count = 0; + struct mvx_duration range; + struct mvx_time *tbuf = (struct mvx_time *)drain_ram1_if.buf; + int ofirst = (drain_ram1_if.write_pos / sizeof(struct mvx_time)) & (MVX_TIME_NUM - 1); + int olast = ofirst == 0 ? MVX_TIME_NUM - 1 : ofirst - 1; + + sem_taken = down_interruptible(&drain_ram1_if.sem); + if (tbuf[olast].timespec.tv_sec == 0 && tbuf[olast].timespec.tv_nsec == 0) { + /* VPU hasn't processed any workload */ + if (sem_taken == 0) + up(&drain_ram1_if.sem); + mvx_log_perf.utilization = 0; + return 1; + } + + if (tbuf[ofirst].timespec.tv_sec == 0 && tbuf[ofirst].timespec.tv_nsec == 0) + ofirst = 0; + + /* Make sure the last workload is valid. (parse.start could be 0 for repeat frames) */ + while (tbuf[olast].parse.start == 0 && olast != ofirst) + olast = olast == 0 ? MVX_TIME_NUM - 1 : olast - 1; + + if (timespec64_compare(start, &tbuf[olast].timespec) >= 0 || + (tbuf[olast].parse.start == 0 && olast == ofirst)) { + /* No valid workload in past one second*/ + if (sem_taken == 0) + up(&drain_ram1_if.sem); + mvx_log_perf.utilization = 0; + return 1; + } + + /* Search for the first valid workload in range */ + i = ofirst; + while (timespec64_compare(start, &tbuf[i].timespec) >= 0 || + tbuf[i].parse.start == 0) { + if (i == olast) + break; + i++; + i &= (MVX_TIME_NUM - 1); + } + /* Collect all the time frames in range */ + interval_in_dticks = MVX_UTIL_INTERVAL_TICKS(atomic_read(&mvx_log_perf.freq)) >> 1; + range.end = tbuf[olast].pipe.start; + if (range.end < interval_in_dticks) + range.end += 0x80000000; + range.start = range.end - interval_in_dticks; + j = 0; + i = olast; + do { + i--; + i &= (MVX_TIME_NUM - 1); + if (range.end > 0x80000000 && tbuf[i].pipe.start < interval_in_dticks) + tbuf[i].pipe.start += 0x80000000; + if (range.end > 0x80000000 && tbuf[i].pipe.end < interval_in_dticks) + tbuf[i].pipe.end += 0x80000000; + if (tbuf[i].pipe.end > range.start && tbuf[i].pipe.start < range.end) { + dur_buf[j].start = max(tbuf[i].pipe.start, range.start); + dur_buf[j].end = min(tbuf[i].pipe.end, range.end); + j++; + not_found_count = 0; + } else { + not_found_count++; + } + /* + * Data in buffer might not in time order. So if got one data out of range, + * the next one could be still in the range. Just try, but limit retry count + * to 20 which should be a reasonable number, even in 40-session case. + */ + } while (i != ofirst && (not_found_count < 20 || tbuf[i].parse.start == 0)); + + if (sem_taken == 0) + up(&drain_ram1_if.sem); + + *crange = range; + *count = j; + + return 0; +} + +/****************************************************************************** + * External interface + ******************************************************************************/ + +int mvx_log_group_init(const char *entry_name) +{ + int ret; + struct mvx_log_drain *drain_default = &drain_dmesg_if; + struct mvx_log_drain *drain_ram = &drain_ram0_if.base; + struct mvx_log_drain *drain_ram1 = &drain_ram1_if.base; + +#ifdef MVX_LOG_FTRACE_ENABLE + drain_default = &drain_ftrace_if; +#endif /* MVX_LOG_FTRACE_ENABLE */ + + /* Construct log object. */ + ret = mvx_log_construct(&log, entry_name); + if (ret != 0) + return ret; + + /* Construct drain objects and add them to log. */ + mvx_log_drain_dmesg_construct(&drain_dmesg_if); + ret = mvx_log_drain_add(&log, "dmesg", &drain_dmesg_if); + if (ret != 0) + goto delete_log_entry; + + mvx_log_drain_ram_construct(&drain_ram0_if, 64 * 1024); + ret = mvx_log_drain_ram_add(&log, "ram0", &drain_ram0_if); + if (ret != 0) + goto delete_dmesg_drain; + + mvx_log_drain_ram_construct(&drain_ram1_if, + sizeof(struct mvx_time) * MVX_TIME_NUM); + ret = mvx_log_drain_ram_add(&log, "ram1", &drain_ram1_if); + if (ret != 0) + goto delete_ram_drain; + +#ifdef MVX_LOG_FTRACE_ENABLE + mvx_log_drain_ftrace_construct(&drain_ftrace_if); + mvx_log_drain_add(&log, "ftrace", &drain_ftrace_if); + if (ret != 0) + goto delete_ram1_drain; + +#endif /* MVX_LOG_FTRACE_ENABLE */ + + /* Construct group objects. */ + mvx_log_group_construct(&mvx_log_if, "MVX if", MVX_LOG_WARNING, + drain_default); + ret = mvx_log_group_add(&log, "generic", &mvx_log_if); + if (ret != 0) + goto delete_ftrace_drain; + + mvx_log_group_construct(&mvx_log_fwif_if, "MVX fwif", MVX_LOG_INFO, + drain_ram); + ret = mvx_log_group_add(&log, "firmware_interface", + &mvx_log_fwif_if); + if (ret != 0) + goto delete_generic_group; + + mvx_log_group_construct(&mvx_log_session_if, "MVX session", + MVX_LOG_WARNING, + drain_default); + ret = mvx_log_group_add(&log, "session", + &mvx_log_session_if); + if (ret != 0) + goto delete_fwif_group; + + mvx_log_group_construct(&mvx_log_dev, "MVX dev", MVX_LOG_WARNING, + drain_default); + ret = mvx_log_group_add(&log, "dev", &mvx_log_dev); + if (ret != 0) + goto delete_session_group; + + mvx_log_group_construct(&mvx_log_perf, "MVX perf", MVX_LOG_INFO, + drain_ram1); + ret = mvx_log_group_add(&log, "perf", + &mvx_log_perf); + if (ret != 0) + goto delete_generic_group; + +#if MVX_USE_UTILIZATION_TIMER + util_timer.function = mvx_log_get_util; + util_timer.expires = jiffies + msecs_to_jiffies(MVX_UTIL_INTERVAL_MSEC); + add_timer(&util_timer); +#endif + + dur_buf = vmalloc(sizeof(struct mvx_duration) * 2 * MVX_TIME_NUM); + + return 0; + +delete_session_group: + mvx_log_group_destruct(&mvx_log_session_if); + +delete_fwif_group: + mvx_log_group_destruct(&mvx_log_fwif_if); + +delete_generic_group: + mvx_log_group_destruct(&mvx_log_if); + +delete_ftrace_drain: + +#ifdef MVX_LOG_FTRACE_ENABLE + mvx_log_drain_ftrace_destruct(&drain_ftrace_if); + +delete_ram1_drain: +#endif /* MVX_LOG_FTRACE_ENABLE */ + + mvx_log_drain_ram_destruct(&drain_ram1_if); + +delete_ram_drain: + mvx_log_drain_ram_destruct(&drain_ram0_if); + +delete_dmesg_drain: + mvx_log_drain_dmesg_destruct(&drain_dmesg_if); + +delete_log_entry: + mvx_log_destruct(&log); + + if (dur_buf) + vfree(dur_buf); + dur_buf = NULL; + + return ret; +} + +void mvx_log_group_deinit(void) +{ + /* Destroy objects in reverse order. */ + if (dur_buf) + vfree(dur_buf); + dur_buf = NULL; +#if MVX_USE_UTILIZATION_TIMER + del_timer(&util_timer); +#endif + mvx_log_group_destruct(&mvx_log_dev); + mvx_log_group_destruct(&mvx_log_session_if); + mvx_log_group_destruct(&mvx_log_fwif_if); + mvx_log_group_destruct(&mvx_log_if); + +#ifdef MVX_LOG_FTRACE_ENABLE + mvx_log_drain_ftrace_destruct(&drain_ftrace_if); +#endif /* MVX_LOG_FTRACE_ENABLE */ + + mvx_log_drain_ram_destruct(&drain_ram1_if); + mvx_log_drain_ram_destruct(&drain_ram0_if); + mvx_log_drain_dmesg_destruct(&drain_dmesg_if); + + mvx_log_destruct(&log); +} + +void mvx_log_get_util(struct timer_list *timer) +{ + int n, i, j; + int util; + uint32_t min, max, range, range1, range2; + struct mvx_duration crange; + struct mvx_duration *duration; + struct timespec64 now, start; + struct mvx_duration *dbuf = dur_buf; + +#if MVX_USE_UTILIZATION_TIMER + if (timer) + mod_timer(timer, jiffies + msecs_to_jiffies(MVX_UTIL_INTERVAL_MSEC)); +#endif + + if (!(mvx_log_perf.enabled & MVX_LOG_PERF_UTILIZATION) || dur_buf == NULL) { + mvx_log_perf.enabled &= ~MVX_LOG_PERF_UTILIZATION; + mvx_log_perf.utilization = -1; + return; + } + + ktime_get_real_ts64(&now); + start.tv_sec = now.tv_sec > MVX_UTIL_INTERVAL_SEC ? + now.tv_sec - MVX_UTIL_INTERVAL_SEC : 0; + start.tv_nsec = now.tv_nsec; + + /* To avoid too frequent refresh */ + if (timespec64_compare(&start, &mvx_log_perf.ts) < 0) + return; + mvx_log_perf.ts = now; + + if (mvx_log_get_time_range(&start, &crange, &n) != 0) + return; + + /* There should be workload in VPU in past one second, calculate utilization. */ + /* Try to merge time frames */ + for (i = 0; i < n - 1; i++) { + duration = dbuf + i; + if (duration->start == duration->end) + continue; + for (j = i + 1; j < n; j++) { + if (dbuf[j].start == dbuf[j].end) + continue; + min = min(duration->start, dbuf[j].start); + max = max(duration->end, dbuf[j].end); + range = max - min; + range1 = duration->end - duration->start; + range2 = dbuf[j].end - dbuf[j].start; + if (range <= range1 + range2) { + /* the two durations have overlap, so can be merged */ + duration->start = min; + duration->end = max; + dbuf[j].start = 0; + dbuf[j].end = 0; + } + } + } + + util = 0; + for (i = 0; i < n; i++) + util += dbuf[i].end - dbuf[i].start; + /* Calculate utilization in unit of 0.01 percent */ + mvx_log_perf.utilization = + min(10000, + (int)((uint64_t)util * 20000 / MVX_UTIL_INTERVAL_TICKS(atomic_read(&mvx_log_perf.freq)))); + + return; +} diff --git a/drivers/media/platform/cix/cix_vpu/mvx_log_group.h b/drivers/media/platform/cix/cix_vpu/mvx_log_group.h new file mode 100755 index 000000000000..37f5e8e590a2 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_log_group.h @@ -0,0 +1,69 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_LOG_GROUP_H_ +#define _MVX_LOG_GROUP_H_ + +/**************************************************************************** + * Includes + ****************************************************************************/ + +#include "mvx_log.h" + +/****************************************************************************** + * Prototypes + ******************************************************************************/ + +extern struct mvx_log_group mvx_log_if; +extern struct mvx_log_group mvx_log_fwif_if; +extern struct mvx_log_group mvx_log_perf; +extern struct mvx_log_group mvx_log_session_if; +extern struct mvx_log_group mvx_log_dev; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_log_group_init() - Initialize log module. This function must be called + * before any of the log groups is used. + * @entry_name: The name of the directory + * + * Return: 0 on success, else error code. + */ +int mvx_log_group_init(const char *entry_name); + +/** + * mvx_log_group_deinit() - Destroy log module. + */ +void mvx_log_group_deinit(void); + +#endif /* _MVX_LOG_GROUP_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/mvx_log_ram.h b/drivers/media/platform/cix/cix_vpu/mvx_log_ram.h new file mode 100755 index 000000000000..1f0d95d3976c --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_log_ram.h @@ -0,0 +1,212 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef MVX_LOG_RAM_H +#define MVX_LOG_RAM_H + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#ifndef __KERNEL__ +#include +#include +#include +#else +#include +#include +#endif + +/****************************************************************************** + * Defines + ******************************************************************************/ + +/** + * Magic word "MVXL" that prefix all messages. + * + * Messages are stored in native byte order. The magic word can be used to + * detect if the log has been stored in the same byte order as the application + * unpacking the log is using. + */ +#define MVX_LOG_MAGIC 0x4d56584c + +/** + * The maximum message length. + */ +#define MVX_LOG_MESSAGE_LENGTH_MAX 4096 + +/****************************************************************************** + * Types + ******************************************************************************/ + +/** + * enum mvx_log_ioctl - IOCTL commands. + * @MVX_LOG_IOCTL_CLEAR: Clear the log. + */ +enum mvx_log_ioctl { + MVX_LOG_IOCTL_CLEAR +}; + +/** + * enum mvx_log_type - Message type. The definitions are assigned values that + * are not allowed to change. + */ +enum mvx_log_type { + MVX_LOG_TYPE_TEXT = 0, + MVX_LOG_TYPE_FWIF = 1, + MVX_LOG_TYPE_FW_BINARY = 2, + MVX_LOG_TYPE_MAX +}; + +/** + * struct mvx_log_timeval - Portable time value format. + * @sec: Seconds since 1970-01-01, Unix time epoch. + * @nsec: Nano seconds. + */ +struct mvx_log_timeval { + uint64_t sec; + uint64_t nsec; +} +__attribute__((packed)); + +/** + * struct mvx_log_header - Common header for all messages stored in RAM buffer. + * @magic: Magic word. + * @length: Length of message, excluding this header. + * @type: Message type. + * @severity: Message severity. + * @timestamp: Time stamp. + */ +struct mvx_log_header { + uint32_t magic; + uint16_t length; + uint8_t type; + uint8_t severity; + struct mvx_log_timeval timestamp; +} +__attribute__((packed)); + +/****************************************************************************** + * Text message + ******************************************************************************/ + +/** + * struct mvx_log_text - ASCII text message. + * @message[0]: ASCII text message. + * + * The message shall be header.length long and should end with a standard ASCII + * character. The parser of the log will add new line and null terminate + * the string. + */ +struct mvx_log_text { + char message[0]; +} +__attribute__((packed)); + +/****************************************************************************** + * Firmware interface + ******************************************************************************/ + +/** + * enum mvx_log_fwif_channel - Firmware interface message types. + */ +enum mvx_log_fwif_channel { + MVX_LOG_FWIF_CHANNEL_MESSAGE, + MVX_LOG_FWIF_CHANNEL_INPUT_BUFFER, + MVX_LOG_FWIF_CHANNEL_OUTPUT_BUFFER, + MVX_LOG_FWIF_CHANNEL_RPC +}; + +/** + * enum mvx_log_fwif_direction - Firmware interface message types. + */ +enum mvx_log_fwif_direction { + MVX_LOG_FWIF_DIRECTION_HOST_TO_FIRMWARE, + MVX_LOG_FWIF_DIRECTION_FIRMWARE_TO_HOST +}; + +/** + * enum mvx_log_fwif_code - Special message codes for message types not defined + * by the firmware interface. + */ +enum mvx_log_fwif_code { + MVX_LOG_FWIF_CODE_STAT = 16000 +}; + +/** + * struct mvx_log_fwif - Firmware interface header type. + * @version_minor: Protocol version. + * @version_major: Protocol version. + * @channel: @see enum mvx_log_fwif_channel. + * @direction: @see enum mvx_log_fwif_direction. + * @session: Session id. + * @data[0]: Data following the firmware interface message + * header. + */ +struct mvx_log_fwif { + uint8_t version_minor; + uint8_t version_major; + uint8_t channel; + uint8_t direction; + uint64_t session; + uint8_t data[0]; +} +__attribute__((packed)); + +/** + * struct mvx_log_fwif_stat - Firmware interface statistics. + * @handle: Buffer handle. + * @queued: Number of buffers currently queued to the firmware. + */ +struct mvx_log_fwif_stat { + uint64_t handle; + uint32_t queued; +} +__attribute__((packed)); + +/****************************************************************************** + * Firmware binary header + ******************************************************************************/ + +/** + * struct mvx_log_fw_binary - Firmware binary header. + * @session: Session id. + * @data[0]: Firmware binary, byte 0..length. + * + * The first ~100 bytes of the firmware binary contain information describing + * the codec. + */ +struct mvx_log_fw_binary { + uint64_t session; + uint8_t data[0]; +}; + +#endif /* MVX_LOG_RAM_H */ diff --git a/drivers/media/platform/cix/cix_vpu/mvx_pm_runtime.c b/drivers/media/platform/cix/cix_vpu/mvx_pm_runtime.c new file mode 100755 index 000000000000..16045950c775 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_pm_runtime.c @@ -0,0 +1,65 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include +#include "mvx_log_group.h" + +int mvx_pm_runtime_get_sync(struct device *dev) +{ +#ifdef CONFIG_PM + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "PM runtime get sync failed! ret=%d", ret); + + return ret; +#else /* !CONFIG_PM */ + return 1; +#endif /* CONFIG_PM */ +} + +int mvx_pm_runtime_put_sync(struct device *dev) +{ +#ifdef CONFIG_PM + int ret; + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + MVX_LOG_PRINT(&mvx_log_dev, MVX_LOG_WARNING, + "PM runtime put sync failed! ret=%d", ret); + + return ret; +#else /* !CONFIG_PM */ + return 0; +#endif /* CONFIG_PM */ +} diff --git a/drivers/media/platform/cix/cix_vpu/mvx_pm_runtime.h b/drivers/media/platform/cix/cix_vpu/mvx_pm_runtime.h new file mode 100755 index 000000000000..17da61a1239a --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_pm_runtime.h @@ -0,0 +1,67 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_PM_RUNTIME_H_ +#define _MVX_PM_RUNTIME_H_ + +/**************************************************************************** + * Types + ****************************************************************************/ + +struct device; + +/**************************************************************************** + * Exported functions + ****************************************************************************/ + +/** + * mvx_pm_runtime_get_sync() - The same function as pm_runtime_get_sync(), but + * with the addon that it prints a log line when + * error happens. + * @dev: Pointer to device. + * + * Return: 0 on success, 1 if already 'active', else error code. + */ +int mvx_pm_runtime_get_sync(struct device *dev); + +/** + * mvx_pm_runtime_put_sync() - The same function as pm_runtime_put_sync(), but + * with the addon that it prints a log line when + * error happens. + * It will not return error if CONFIG_PM is + * undefined. + * @dev: Pointer to device. + * + * Return: 0 on success, 1 if already 'suspended', else error code. + */ +int mvx_pm_runtime_put_sync(struct device *dev); + +#endif /* _MVX_PM_RUNTIME_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/mvx_seq.c b/drivers/media/platform/cix/cix_vpu/mvx_seq.c new file mode 100755 index 000000000000..f7074e7d01a1 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_seq.c @@ -0,0 +1,95 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include +#include +#include "mvx_seq.h" + +struct mvx_seq_hash_it *mvx_seq_hash_start(struct device *dev, + struct hlist_head *head, + size_t size, + loff_t pos) +{ + struct mvx_seq_hash_it *it; + size_t i; + + it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL); + if (it == NULL) + return ERR_PTR(-ENOMEM); + + it->dev = dev; + for (i = 0; i < size; ++i) { + it->i = i; + hlist_for_each(it->node, &head[i]) { + if (pos-- == 0) + return it; + } + } + + devm_kfree(dev, it); + return NULL; +} + +struct mvx_seq_hash_it *mvx_seq_hash_next(void *v, + struct hlist_head *head, + size_t size, + loff_t *pos) +{ + struct mvx_seq_hash_it *it = v; + + ++*pos; + it->node = it->node->next; + + if (it->node != NULL) + return it; + + do { + ++it->i; + } while ((it->i < size) && hlist_empty(&head[it->i])); + + if (it->i == size) { + devm_kfree(it->dev, it); + return NULL; + } + + it->node = head[it->i].first; + return it; +} + +void mvx_seq_hash_stop(void *v) +{ + struct mvx_seq_hash_it *it = v; + + if (it == NULL) + return; + + devm_kfree(it->dev, it); +} diff --git a/drivers/media/platform/cix/cix_vpu/mvx_seq.h b/drivers/media/platform/cix/cix_vpu/mvx_seq.h new file mode 100755 index 000000000000..3713621b6527 --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/mvx_seq.h @@ -0,0 +1,94 @@ +/* + * The confidential and proprietary information contained in this file may + * only be used by a person authorised under and to the extent permitted + * by a subsisting licensing agreement from Arm Technology (China) Co., Ltd. + * + * (C) COPYRIGHT 2021-2021 Arm Technology (China) Co., Ltd. + * ALL RIGHTS RESERVED + * + * This entire notice must be reproduced on all copies of this file + * and copies of this file may only be made by a person if such person is + * permitted to do so under the terms of a subsisting license agreement + * from Arm Technology (China) Co., Ltd. + * + * SPDX-License-Identifier: GPL-2.0-only + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#ifndef _MVX_SEQ_H_ +#define _MVX_SEQ_H_ + +/**************************************************************************** + * Defines + ****************************************************************************/ + +#define mvx_seq_printf(s, tag, ind, fmt, ...) \ + seq_printf(s, "%-*s%-*s: " fmt, (3 * (ind)), "", 30 - (3 * (ind)), \ + tag, ## __VA_ARGS__) + +/**************************************************************************** + * Types + ****************************************************************************/ + +/** + * struct mvx_seq_hash_it - Iterator over hash table. + */ +struct mvx_seq_hash_it { + struct hlist_node *node; + size_t i; + struct device *dev; +}; + +/** + * mvx_seq_hash_start() - Initialize iterator. + * @dev: Pointer to device. + * @head: Pointer to a head of a hash table. + * @size: Size of a hash table. + * @pos: Position to start. + * + * Iterator created by this function should be provided to + * mvx_seq_hash_start and mvx_seq_hash_stop as the first parameter. + * + * Return: Pointer to an iterator on success or ERR_PTR(). + */ +struct mvx_seq_hash_it *mvx_seq_hash_start(struct device *dev, + struct hlist_head *head, + size_t size, + loff_t pos); + +/** + * mvx_seq_hash_next() - Move iterator to the next element. + * @v: Pointer to an iterator. + * @head: Pointer to a head of a hash table. + * @size: Size of a hash table. + * @pos: Position. + * + * Return: Iterator which points to a new element or NULL when the table + * is over. + */ +struct mvx_seq_hash_it *mvx_seq_hash_next(void *v, + struct hlist_head *head, + size_t size, + loff_t *pos); + +/** + * mvx_seq_hash_stop() - Close an iterator. + * @v: Pointer to an iterator. + */ +void mvx_seq_hash_stop(void *v); + +#endif /* _MVX_SEQ_H_ */ diff --git a/drivers/media/platform/cix/cix_vpu/sconscript b/drivers/media/platform/cix/cix_vpu/sconscript new file mode 100755 index 000000000000..f55ebf2d53ae --- /dev/null +++ b/drivers/media/platform/cix/cix_vpu/sconscript @@ -0,0 +1,27 @@ +import os + +Import('env') + +# Get source path to current directory. +path = env.Dir('.').srcnode().path + +prints = "" + +targets = [os.path.join('#', path, 'amvx.ko')] + +extra_ccflags="" +if 'EXTRA_CCFLAGS' in os.environ: + extra_ccflags = os.environ['EXTRA_CCFLAGS'] + +amvx = env.Command(targets, [], + 'make -C %s mono_v4l2 KDIR=%s EXTRA_CCFLAGS="%s" %s' % + (path, env['KDIR'], extra_ccflags, prints)) + +# Flag to always build. +env.AlwaysBuild(amvx) + +# Install kernel module in bin directory. +env.Install(env['BIN_DIR'], [amvx]) + +# Install user space header files +env.Install(env['INCLUDE_DIR'], ['linux/mvx-v4l2-controls.h', 'external/fw_v2/mve_protocol_def.h', 'mvx_log_ram.h'])