Make a guess:

 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warrant the
 * warranties of merchantability and fitness for a particular purpose.
 *
 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

#include "amdgpu_vcn.h"

#include <drm/amdgpu_drm.h>

#include <gfx/convert.h>

#include "amdgpu_vm.h"
#include "amdgpu_gmc.h"

/* max allowed register size */
#define AMD_VCN_REG_MAX					0xffff0000

#define AMD_VCN_CONTEXT_VERSION_MAJOR		6
#define AMD_VCN_CONTEXT_VERSION_MINOR		4
#define AMD_VCN_CONTEXT_VERSION_SUBLEVEL	0
#define AMD_VCN_CONTEXT_VERSION_STEP		16

static void amdgpu_vcn_set_vm_params(struct amdgpu_device *adev)
{
	struct amdgpu_vm_param vm_param;
	struct amdgpu_vm_param_list vm_params;

	memset(&vm_params, 0, sizeof(vm_params));
	vm_params.type = AMD_VM_PARAM_TYPE_SIZE;
	vm_params.num_entries = 0;

	memset(&vm_param, 0, sizeof(vm_param));
	vm_param.size = &vm_params;

	mutex_lock(&adev->grbm_idx_mutex);
	/* add vm offset */
	if (adev->vm_manager.max_va)
		vm_param.num_entries += 1;
	mutex_unlock(&adev->grbm_idx_mutex);

	/* allocate pages to transfer VM params */
	mutex_lock(&adev->grbm_idx_mutex);
	vm_param.pages = dma_alloc_coherent(&adev->pdev->dev,
					      vm_param.num_entries *
					      AMDGPU_GPU_PAGE_SIZE,
					      &adev->vm_manager.large_page_dma,
					      GFP_KERNEL);
	mutex_unlock(&adev->grbm_idx_mutex);
	if (!vm_param.pages)
		return;

	memcpy(vm_param.pages,
	       &vm_params,
	       sizeof(vm_param.pages));
	amdgpu_vm_allocate_pages(&adev->vm_manager, &vm_param);
}

void amdgpu_vcn_pre_init(struct amdgpu_device *adev)
{
	struct drm_device *dev = adev_to_drm(adev);
	struct amdgpu_ring *ring;
	struct amdgpu_ring *tmp;

	/* VCN context */
	WREG32_SOC15(VCN, 0, mmUVD_CONTEXT0, adev->vcn.inst->context_id);
	adev->vcn.save_regs[AMDGPU_UVD_REG_OFFSET(VCN)] = RREG32_SOC15(VCN, 0, mmUVD_CONTEXT0);
	WREG32_SOC15(VCN, 0, mmUVD_GPCOM_VCPU_CMD,
		     AMDGPU_GPCOM_VCPU_CMD_GET_CURRENT_GPCOM_CMD |
		     AMDGPU_GPCOM_VCPU_CMD_GET_INDIRECT_GPCOM_CMD |
		     AMDGPU_GPCOM_VCPU_CMD_GET_GP_INDIRECT_DATA |
		     AMDGPU_GPCOM_VCPU_CMD_GET_GP_REG_DATA |
		     AMDGPU_GPCOM_VCPU_CMD_GET_GRBM_REGS_OUTSTANDING |
		     AMDGPU_GPCOM_VCPU_CMD_GET_SYSMEM_DATA_OUTSTANDING |
		     AMDGPU_GPCOM_VCPU_CMD_GET_GRBM_REGS_IN_RESET |
		     AMDGPU_GPCOM_VCPU_CMD_GET_SYSMEM_DATA_IN_RESET);

	/* reset gpu before posting VCN context */
	WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, 1);
	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
		  ~UVD_SOFT_RESET__SOFT_RESET_UVD_MASK);

	/* unhalt VCN */
	WREG32_SOC