*/
#include "adreno_gpu.h"
+#include "a2xx_gpu.h"
static const struct adreno_info a2xx_gpus[] = {
{
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}, { /* a200 on i.mx51 has only 128kib gmem */
.chip_ids = ADRENO_CHIP_IDS(0x02000001),
.family = ADRENO_2XX_GEN1,
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x02020000),
.family = ADRENO_2XX_GEN2,
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a2xx);
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a2xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
- .recover = a2xx_recover,
- .submit = a2xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a2xx_irq,
- .destroy = a2xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_state_get = a2xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = a2xx_create_vm,
- .get_rptr = a2xx_get_rptr,
- },
-};
-
static const struct msm_gpu_perfcntr perfcntrs[] = {
/* TODO */
};
-struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
{
struct a2xx_gpu *a2xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
int ret;
if (!pdev) {
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a2xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = a2xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = a2xx_create_vm,
+ .get_rptr = a2xx_get_rptr,
+ },
+ .init = a2xx_gpu_init,
+};
};
#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+extern const struct adreno_gpu_funcs a2xx_gpu_funcs;
+
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu);
void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error);
*/
#include "adreno_gpu.h"
+#include "a3xx_gpu.h"
static const struct adreno_info a3xx_gpus[] = {
{
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000520),
.family = ADRENO_3XX,
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000600),
.family = ADRENO_3XX,
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000620),
.family = ADRENO_3XX,
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03030000,
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a3xx);
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a3xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
- .recover = a3xx_recover,
- .submit = a3xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a3xx_irq,
- .destroy = a3xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_busy = a3xx_gpu_busy,
- .gpu_state_get = a3xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a3xx_get_rptr,
- },
-};
-
static const struct msm_gpu_perfcntr perfcntrs[] = {
{ REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
};
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
adreno_gpu->registers = a3xx_registers;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a3xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a3xx_recover,
+ .submit = a3xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a3xx_gpu_busy,
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a3xx_get_rptr,
+ },
+ .init = a3xx_gpu_init,
+};
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+extern const struct adreno_gpu_funcs a3xx_gpu_funcs;
+
#endif /* __A3XX_GPU_H__ */
*/
#include "adreno_gpu.h"
+#include "a4xx_gpu.h"
static const struct adreno_info a4xx_gpus[] = {
{
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04020000),
.family = ADRENO_4XX,
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04030002),
.family = ADRENO_4XX,
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a4xx);
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a4xx_hw_init,
- .pm_suspend = a4xx_pm_suspend,
- .pm_resume = a4xx_pm_resume,
- .recover = a4xx_recover,
- .submit = a4xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a4xx_irq,
- .destroy = a4xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_busy = a4xx_gpu_busy,
- .gpu_state_get = a4xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a4xx_get_rptr,
- },
- .get_timestamp = a4xx_get_timestamp,
-};
-
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
{
struct a4xx_gpu *a4xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a4xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
+ .recover = a4xx_recover,
+ .submit = a4xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a4xx_gpu_busy,
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a4xx_get_rptr,
+ },
+ .init = a4xx_gpu_init,
+ .get_timestamp = a4xx_get_timestamp,
+};
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+extern const struct adreno_gpu_funcs a4xx_gpu_funcs;
+
#endif /* __A4XX_GPU_H__ */
*/
#include "adreno_gpu.h"
+#include "a5xx_gpu.h"
static const struct adreno_info a5xx_gpus[] = {
{
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000600),
.family = ADRENO_5XX,
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a506_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000800),
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a508_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000900),
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010200),
.family = ADRENO_5XX,
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a530_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05040001),
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a540_zap.mdt",
}
};
return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a5xx_hw_init,
- .ucode_load = a5xx_ucode_load,
- .pm_suspend = a5xx_pm_suspend,
- .pm_resume = a5xx_pm_resume,
- .recover = a5xx_recover,
- .submit = a5xx_submit,
- .active_ring = a5xx_active_ring,
- .irq = a5xx_irq,
- .destroy = a5xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = a5xx_show,
-#endif
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = a5xx_debugfs_init,
-#endif
- .gpu_busy = a5xx_gpu_busy,
- .gpu_state_get = a5xx_gpu_state_get,
- .gpu_state_put = a5xx_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a5xx_get_rptr,
- },
- .get_timestamp = a5xx_get_timestamp,
-};
-
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
devm_pm_opp_set_supported_hw(dev, &val, 1);
}
-struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
if (config->info->revn == 510)
nr_rings = 1;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
return gpu;
}
+
+const struct adreno_gpu_funcs a5xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a5xx_hw_init,
+ .ucode_load = a5xx_ucode_load,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .submit = a5xx_submit,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = a5xx_debugfs_init,
+#endif
+ .gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a5xx_get_rptr,
+ },
+ .init = a5xx_gpu_init,
+ .get_timestamp = a5xx_get_timestamp,
+};
*/
#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+extern const struct adreno_gpu_funcs a5xx_gpu_funcs;
int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
.gmem = (SZ_128K + SZ_4K),
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.zapfw = "a610_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
},
.gmem = (SZ_128K + SZ_4K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a620_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a620_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a650_protect,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a650_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a660_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a660_protect,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a660_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a690_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.zapfw = "a702_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "a730_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "a740_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION |
ADRENO_QUIRK_IFPC,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION |
ADRENO_QUIRK_IFPC,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
return 0;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a6xx_hw_init,
- .ucode_load = a6xx_ucode_load,
- .pm_suspend = a6xx_gmu_pm_suspend,
- .pm_resume = a6xx_gmu_pm_resume,
- .recover = a6xx_recover,
- .submit = a6xx_submit,
- .active_ring = a6xx_active_ring,
- .irq = a6xx_irq,
- .destroy = a6xx_destroy,
-#if defined(CONFIG_DRM_MSM_GPU_STATE)
- .show = a6xx_show,
-#endif
- .gpu_busy = a6xx_gpu_busy,
- .gpu_get_freq = a6xx_gmu_get_freq,
- .gpu_set_freq = a6xx_gpu_set_freq,
-#if defined(CONFIG_DRM_MSM_GPU_STATE)
- .gpu_state_get = a6xx_gpu_state_get,
- .gpu_state_put = a6xx_gpu_state_put,
-#endif
- .create_vm = a6xx_create_vm,
- .create_private_vm = a6xx_create_private_vm,
- .get_rptr = a6xx_get_rptr,
- .progress = a6xx_progress,
- .sysprof_setup = a6xx_gmu_sysprof_setup,
- },
- .get_timestamp = a6xx_gmu_get_timestamp,
-};
-
-static const struct adreno_gpu_funcs funcs_gmuwrapper = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a6xx_hw_init,
- .ucode_load = a6xx_ucode_load,
- .pm_suspend = a6xx_pm_suspend,
- .pm_resume = a6xx_pm_resume,
- .recover = a6xx_recover,
- .submit = a6xx_submit,
- .active_ring = a6xx_active_ring,
- .irq = a6xx_irq,
- .destroy = a6xx_destroy,
-#if defined(CONFIG_DRM_MSM_GPU_STATE)
- .show = a6xx_show,
-#endif
- .gpu_busy = a6xx_gpu_busy,
-#if defined(CONFIG_DRM_MSM_GPU_STATE)
- .gpu_state_get = a6xx_gpu_state_get,
- .gpu_state_put = a6xx_gpu_state_put,
-#endif
- .create_vm = a6xx_create_vm,
- .create_private_vm = a6xx_create_private_vm,
- .get_rptr = a6xx_get_rptr,
- .progress = a6xx_progress,
- },
- .get_timestamp = a6xx_get_timestamp,
-};
-
-static const struct adreno_gpu_funcs funcs_a7xx = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a6xx_hw_init,
- .ucode_load = a6xx_ucode_load,
- .pm_suspend = a6xx_gmu_pm_suspend,
- .pm_resume = a6xx_gmu_pm_resume,
- .recover = a6xx_recover,
- .submit = a7xx_submit,
- .active_ring = a6xx_active_ring,
- .irq = a6xx_irq,
- .destroy = a6xx_destroy,
-#if defined(CONFIG_DRM_MSM_GPU_STATE)
- .show = a6xx_show,
-#endif
- .gpu_busy = a6xx_gpu_busy,
- .gpu_get_freq = a6xx_gmu_get_freq,
- .gpu_set_freq = a6xx_gpu_set_freq,
-#if defined(CONFIG_DRM_MSM_GPU_STATE)
- .gpu_state_get = a6xx_gpu_state_get,
- .gpu_state_put = a6xx_gpu_state_put,
-#endif
- .create_vm = a6xx_create_vm,
- .create_private_vm = a6xx_create_private_vm,
- .get_rptr = a6xx_get_rptr,
- .progress = a6xx_progress,
- .sysprof_setup = a6xx_gmu_sysprof_setup,
- },
- .get_timestamp = a6xx_gmu_get_timestamp,
-};
-
-struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu *gpu;
extern int enable_preemption;
bool is_a7xx;
- int ret;
+ int ret, nr_rings = 1;
a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
if (!a6xx_gpu)
if ((enable_preemption == 1) || (enable_preemption == -1 &&
(config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4);
- else if (is_a7xx)
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
- else if (adreno_has_gmu_wrapper(adreno_gpu) ||
- of_device_is_compatible(node, "qcom,adreno-rgmu"))
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
- else
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ nr_rings = 4;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
return gpu;
}
+
+const struct adreno_gpu_funcs a6xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .init = a6xx_gpu_init,
+ .get_timestamp = a6xx_gmu_get_timestamp,
+};
+
+const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .init = a6xx_gpu_init,
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+const struct adreno_gpu_funcs a7xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a7xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .init = a6xx_gpu_init,
+ .get_timestamp = a6xx_gmu_get_timestamp,
+};
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+extern const struct adreno_gpu_funcs a6xx_gpu_funcs;
+extern const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs;
+extern const struct adreno_gpu_funcs a7xx_gpu_funcs;
+
static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
{
if(adreno_is_a630(gpu))
priv->has_cached_coherent =
!!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
- gpu = info->init(drm);
+ gpu = info->funcs->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
return PTR_ERR(gpu);
(((_c) >> 8) & 0xff), \
((_c) & 0xff)
+struct adreno_gpu;
+
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ struct msm_gpu *(*init)(struct drm_device *dev);
int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
};
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
u64 quirks;
- struct msm_gpu *(*init)(struct drm_device *dev);
+ const struct adreno_gpu_funcs *funcs;
const char *zapfw;
u32 inactive_period;
union {
OUT_RING(ring, PKT7(opcode, cnt));
}
-struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
-
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);