intel_uc.h 8.72 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */
24 25
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
26 27 28

#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
29
#include "intel_ringbuffer.h"
30

31 32
#include "i915_vma.h"

33 34
struct drm_i915_gem_request;

35 36
/*
 * This structure primarily describes the GEM object shared with the GuC.
37 38 39
 * The specs sometimes refer to this object as a "GuC context", but we use
 * the term "client" to avoid confusion with hardware contexts. This
 * GEM object is held for the entire lifetime of our interaction with
40 41 42 43 44 45 46 47 48
 * the GuC, being allocated before the GuC is loaded with its firmware.
 * Because there's no way to update the address used by the GuC after
 * initialisation, the shared object must stay pinned into the GGTT as
 * long as the GuC is in use. We also keep the first page (only) mapped
 * into kernel address space, as it includes shared data that must be
 * updated on every request submission.
 *
 * The single GEM object described here is actually made up of several
 * separate areas, as far as the GuC is concerned. The first page (kept
49
 * kmap'd) includes the "process descriptor" which holds sequence data for
50 51 52 53 54 55
 * the doorbell, and one cacheline which actually *is* the doorbell; a
 * write to this will "ring the doorbell" (i.e. send an interrupt to the
 * GuC). The subsequent  pages of the client object constitute the work
 * queue (a circular array of work items), again described in the process
 * descriptor. Work queue pages are mapped momentarily as required.
 *
56 57 58 59 60 61 62 63 64 65
 * We also keep a few statistics on failures. Ideally, these should all
 * be zero!
 *   no_wq_space: times that the submission pre-check found no space was
 *                available in the work queue (note, the queue is shared,
 *                not per-engine). It is OK for this to be nonzero, but
 *                it should not be huge!
 *   b_fail: failed to ring the doorbell. This should never happen, unless
 *           somehow the hardware misbehaves, or maybe if the GuC firmware
 *           crashes? We probably need to reset the GPU to recover.
 *   retcode: errno from last guc_submit()
66
 */
67
struct i915_guc_client {
68
	struct i915_vma *vma;
69
	void *vaddr;
70
	struct i915_gem_context *owner;
71
	struct intel_guc *guc;
72 73

	uint32_t engines;		/* bitmap of (host) engine ids	*/
74
	uint32_t priority;
75
	u32 stage_id;
76
	uint32_t proc_desc_offset;
77

78 79 80
	u16 doorbell_id;
	unsigned long doorbell_offset;
	u32 doorbell_cookie;
81

82
	spinlock_t wq_lock;
83 84 85
	uint32_t wq_offset;
	uint32_t wq_size;
	uint32_t wq_tail;
86
	uint32_t wq_rsvd;
87
	uint32_t no_wq_space;
88 89
	uint32_t b_fail;
	int retcode;
90 91

	/* Per-engine counts of GuC submissions */
92
	uint64_t submissions[I915_NUM_ENGINES];
93 94
};

95 96 97 98 99
enum intel_uc_fw_status {
	INTEL_UC_FIRMWARE_FAIL = -1,
	INTEL_UC_FIRMWARE_NONE = 0,
	INTEL_UC_FIRMWARE_PENDING,
	INTEL_UC_FIRMWARE_SUCCESS
100 101
};

102 103 104 105 106 107 108 109 110 111 112 113 114 115
/* User-friendly representation of an enum */
static inline
const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
{
	switch (status) {
	case INTEL_UC_FIRMWARE_FAIL:
		return "FAIL";
	case INTEL_UC_FIRMWARE_NONE:
		return "NONE";
	case INTEL_UC_FIRMWARE_PENDING:
		return "PENDING";
	case INTEL_UC_FIRMWARE_SUCCESS:
		return "SUCCESS";
	}
116
	return "<invalid>";
117 118
}

119 120 121 122 123
enum intel_uc_fw_type {
	INTEL_UC_FW_TYPE_GUC,
	INTEL_UC_FW_TYPE_HUC
};

124 125 126 127 128 129 130 131 132
/* User-friendly representation of an enum */
static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
{
	switch (type) {
	case INTEL_UC_FW_TYPE_GUC:
		return "GuC";
	case INTEL_UC_FW_TYPE_HUC:
		return "HuC";
	}
133
	return "uC";
134 135
}

136 137 138 139
/*
 * This structure encapsulates all the data needed during the process
 * of fetching, caching, and loading the firmware image into the GuC.
 */
140 141 142 143 144 145 146 147 148 149 150
struct intel_uc_fw {
	const char *path;
	size_t size;
	struct drm_i915_gem_object *obj;
	enum intel_uc_fw_status fetch_status;
	enum intel_uc_fw_status load_status;

	uint16_t major_ver_wanted;
	uint16_t minor_ver_wanted;
	uint16_t major_ver_found;
	uint16_t minor_ver_found;
151

152
	enum intel_uc_fw_type type;
153 154 155 156 157 158
	uint32_t header_size;
	uint32_t header_offset;
	uint32_t rsa_size;
	uint32_t rsa_offset;
	uint32_t ucode_size;
	uint32_t ucode_offset;
159 160
};

161 162 163
struct intel_guc_log {
	uint32_t flags;
	struct i915_vma *vma;
164 165 166 167 168 169 170
	/* The runtime stuff gets created only when GuC logging gets enabled */
	struct {
		void *buf_addr;
		struct workqueue_struct *flush_wq;
		struct work_struct flush_work;
		struct rchan *relay_chan;
	} runtime;
171 172 173 174 175 176
	/* logging related stats */
	u32 capture_miss_count;
	u32 flush_interrupt_count;
	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
	u32 flush_count[GUC_MAX_LOG_BUFFER];
177 178
};

179
struct intel_guc {
180
	struct intel_uc_fw fw;
181
	struct intel_guc_log log;
182

183
	/* intel_guc_recv interrupt related state */
184 185
	bool interrupts_enabled;

186
	struct i915_vma *ads_vma;
187 188 189
	struct i915_vma *stage_desc_pool;
	void *stage_desc_pool_vaddr;
	struct ida stage_ids;
190 191 192

	struct i915_guc_client *execbuf_client;

193
	DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
194 195 196 197 198 199 200 201 202
	uint32_t db_cacheline;		/* Cyclic counter mod pagesize	*/

	/* Action status & statistics */
	uint64_t action_count;		/* Total commands issued	*/
	uint32_t action_cmd;		/* Last command word		*/
	uint32_t action_status;		/* Last return status		*/
	uint32_t action_fail;		/* Total number of failures	*/
	int32_t action_err;		/* Last error code		*/

203 204
	uint64_t submissions[I915_NUM_ENGINES];
	uint32_t last_seqno[I915_NUM_ENGINES];
205

206 207
	/* To serialize the intel_guc_send actions */
	struct mutex send_mutex;
208 209 210

	/* GuC's FW specific send function */
	int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
211 212
};

213 214 215 216 217 218 219
struct intel_huc {
	/* Generic uC firmware management */
	struct intel_uc_fw fw;

	/* HuC-specific additions */
};

220
/* intel_uc.c */
221
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
222
void intel_uc_init_early(struct drm_i915_private *dev_priv);
223
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
224
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
225
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
226
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
227
int intel_guc_sample_forcewake(struct intel_guc *guc);
228 229 230 231 232
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
	return guc->send(guc, action, len);
}
233

234
/* intel_guc_loader.c */
235
int intel_guc_select_fw(struct intel_guc *guc);
236
int intel_guc_init_hw(struct intel_guc *guc);
237 238
int intel_guc_suspend(struct drm_i915_private *dev_priv);
int intel_guc_resume(struct drm_i915_private *dev_priv);
239
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
240

241
/* i915_guc_submission.c */
242 243
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
244
int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
245
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
246 247
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
248 249 250
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);

/* intel_guc_log.c */
251 252 253
int intel_guc_log_create(struct intel_guc *guc);
void intel_guc_log_destroy(struct intel_guc *guc);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
254 255
void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
256

257 258 259 260
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{
	u32 offset = i915_ggtt_offset(vma);
	GEM_BUG_ON(offset < GUC_WOPCM_TOP);
261
	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
262 263 264
	return offset;
}

265
/* intel_huc.c */
266
void intel_huc_select_fw(struct intel_huc *huc);
267
int intel_huc_init_hw(struct intel_huc *huc);
268
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
269

270
#endif