Skip to content
Snippets Groups Projects
Select Git revision
  • 896868eded124059023be0af92d68cdaf9b4de70
  • drm-misc-templates default
  • wip/final/kci-gitlab-lava-v1
  • wip/vignesh/kci-lava-gitlab-runner
  • kci-gitlab-igt-v8
  • kci-gitlab-igt-v4
  • drm-misc-fixes-2024-10-02
  • drm-misc-next-2024-09-26
  • drm-misc-fixes-2024-09-26
  • drm-misc-next-2024-09-20
  • drm-misc-fixes-2024-09-12
  • drm-misc-fixes-2024-09-05
  • drm-misc-next-fixes-2024-09-05
  • drm-misc-fixes-2024-08-29
  • drm-misc-next-2024-08-29
  • drm-misc-next-2024-08-22
  • drm-misc-fixes-2024-08-22
  • drm-misc-next-2024-08-16
  • drm-misc-fixes-2024-08-15
  • drm-misc-next-2024-08-09
  • drm-misc-fixes-2024-08-08
  • drm-misc-next-2024-08-01
  • drm-misc-fixes-2024-08-01
  • drm-misc-next-fixes-2024-07-25
  • drm-misc-next-fixes-2024-07-19
  • drm-misc-next-fixes-2024-07-11
26 results

panthor_sched.c

Blame
  • panthor_sched.c 98.89 KiB
    // SPDX-License-Identifier: GPL-2.0 or MIT
    /* Copyright 2023 Collabora ltd. */
    
    #include <drm/drm_drv.h>
    #include <drm/drm_exec.h>
    #include <drm/drm_gem_shmem_helper.h>
    #include <drm/drm_managed.h>
    #include <drm/gpu_scheduler.h>
    #include <drm/panthor_drm.h>
    
    #include <linux/build_bug.h>
    #include <linux/clk.h>
    #include <linux/delay.h>
    #include <linux/dma-mapping.h>
    #include <linux/dma-resv.h>
    #include <linux/firmware.h>
    #include <linux/interrupt.h>
    #include <linux/io.h>
    #include <linux/iopoll.h>
    #include <linux/iosys-map.h>
    #include <linux/module.h>
    #include <linux/platform_device.h>
    #include <linux/pm_runtime.h>
    
    #include "panthor_devfreq.h"
    #include "panthor_device.h"
    #include "panthor_fw.h"
    #include "panthor_gem.h"
    #include "panthor_gpu.h"
    #include "panthor_heap.h"
    #include "panthor_mmu.h"
    #include "panthor_regs.h"
    #include "panthor_sched.h"
    
    /**
     * DOC: Scheduler
     *
     * Mali CSF hardware adopts a firmware-assisted scheduling model, where
     * the firmware takes care of scheduling aspects, to some extent.
     *
     * The scheduling happens at the scheduling group level, each group
     * contains 1 to N queues (N is FW/hardware dependent, and exposed
     * through the firmware interface). Each queue is assigned a command
     * stream ring buffer, which serves as a way to get jobs submitted to
     * the GPU, among other things.
     *
     * The firmware can schedule a maximum of M groups (M is FW/hardware
     * dependent, and exposed through the firmware interface). Passed
     * this maximum number of groups, the kernel must take care of
     * rotating the groups passed to the firmware so every group gets
     * a chance to have his queues scheduled for execution.
     *
     * The current implementation only supports with kernel-mode queues.
     * In other terms, userspace doesn't have access to the ring-buffer.
     * Instead, userspace passes indirect command stream buffers that are
     * called from the queue ring-buffer by the kernel using a pre-defined
     * sequence of command stream instructions to ensure the userspace driver
     * always gets consistent results (cache maintenance,
     * synchronization, ...).
     *
     * We rely on the drm_gpu_scheduler framework to deal with job
     * dependencies and submission. As any other driver dealing with a
     * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
     * entity has its own job scheduler. When a job is ready to be executed
     * (all its dependencies are met), it is pushed to the appropriate
     * queue ring-buffer, and the group is scheduled for execution if it
     * wasn't already active.
     *
     * Kernel-side group scheduling is timeslice-based. When we have less
     * groups than there are slots, the periodic tick is disabled and we