logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

amdgpu_drm.h (40377B)


  1. /* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
  2. *
  3. * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
  4. * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
  5. * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * Copyright 2014 Advanced Micro Devices, Inc.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  22. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  23. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  24. * OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. * Authors:
  27. * Kevin E. Martin <martin@valinux.com>
  28. * Gareth Hughes <gareth@valinux.com>
  29. * Keith Whitwell <keith@tungstengraphics.com>
  30. */
  31. #ifndef __AMDGPU_DRM_H__
  32. #define __AMDGPU_DRM_H__
  33. #include "drm.h"
  34. #if defined(__cplusplus)
  35. extern "C" {
  36. #endif
  37. #define DRM_AMDGPU_GEM_CREATE 0x00
  38. #define DRM_AMDGPU_GEM_MMAP 0x01
  39. #define DRM_AMDGPU_CTX 0x02
  40. #define DRM_AMDGPU_BO_LIST 0x03
  41. #define DRM_AMDGPU_CS 0x04
  42. #define DRM_AMDGPU_INFO 0x05
  43. #define DRM_AMDGPU_GEM_METADATA 0x06
  44. #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
  45. #define DRM_AMDGPU_GEM_VA 0x08
  46. #define DRM_AMDGPU_WAIT_CS 0x09
  47. #define DRM_AMDGPU_GEM_OP 0x10
  48. #define DRM_AMDGPU_GEM_USERPTR 0x11
  49. #define DRM_AMDGPU_WAIT_FENCES 0x12
  50. #define DRM_AMDGPU_VM 0x13
  51. #define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
  52. #define DRM_AMDGPU_SCHED 0x15
  53. #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
  54. #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
  55. #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
  56. #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
  57. #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
  58. #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
  59. #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
  60. #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
  61. #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
  62. #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
  63. #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
  64. #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
  65. #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
  66. #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
  67. #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
  68. #define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
  69. /**
  70. * DOC: memory domains
  71. *
  72. * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
  73. * Memory in this pool could be swapped out to disk if there is pressure.
  74. *
  75. * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
  76. * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
  77. * pages of system memory, allows GPU access system memory in a linearized
  78. * fashion.
  79. *
  80. * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
  81. * carved out by the BIOS.
  82. *
  83. * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
  84. * across shader threads.
  85. *
  86. * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
  87. * execution of all the waves on a device.
  88. *
  89. * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
  90. * for appending data.
  91. *
  92. * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
  93. * signalling user mode queues.
  94. */
  95. #define AMDGPU_GEM_DOMAIN_CPU 0x1
  96. #define AMDGPU_GEM_DOMAIN_GTT 0x2
  97. #define AMDGPU_GEM_DOMAIN_VRAM 0x4
  98. #define AMDGPU_GEM_DOMAIN_GDS 0x8
  99. #define AMDGPU_GEM_DOMAIN_GWS 0x10
  100. #define AMDGPU_GEM_DOMAIN_OA 0x20
  101. #define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
  102. #define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
  103. AMDGPU_GEM_DOMAIN_GTT | \
  104. AMDGPU_GEM_DOMAIN_VRAM | \
  105. AMDGPU_GEM_DOMAIN_GDS | \
  106. AMDGPU_GEM_DOMAIN_GWS | \
  107. AMDGPU_GEM_DOMAIN_OA | \
  108. AMDGPU_GEM_DOMAIN_DOORBELL)
  109. /* Flag that CPU access will be required for the case of VRAM domain */
  110. #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
  111. /* Flag that CPU access will not work, this VRAM domain is invisible */
  112. #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
  113. /* Flag that USWC attributes should be used for GTT */
  114. #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
  115. /* Flag that the memory should be in VRAM and cleared */
  116. #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
  117. /* Flag that allocating the BO should use linear VRAM */
  118. #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
  119. /* Flag that BO is always valid in this VM */
  120. #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
  121. /* Flag that BO sharing will be explicitly synchronized */
  122. #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
  123. /* Flag that indicates allocating MQD gart on GFX9, where the mtype
  124. * for the second page onward should be set to NC. It should never
  125. * be used by user space applications.
  126. */
  127. #define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
  128. /* Flag that BO may contain sensitive data that must be wiped before
  129. * releasing the memory
  130. */
  131. #define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
  132. /* Flag that BO will be encrypted and that the TMZ bit should be
  133. * set in the PTEs when mapping this buffer via GPUVM or
  134. * accessing it with various hw blocks
  135. */
  136. #define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
  137. /* Flag that BO will be used only in preemptible context, which does
  138. * not require GTT memory accounting
  139. */
  140. #define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
  141. /* Flag that BO can be discarded under memory pressure without keeping the
  142. * content.
  143. */
  144. #define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
  145. /* Flag that BO is shared coherently between multiple devices or CPU threads.
  146. * May depend on GPU instructions to flush caches to system scope explicitly.
  147. *
  148. * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
  149. * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
  150. */
  151. #define AMDGPU_GEM_CREATE_COHERENT (1 << 13)
  152. /* Flag that BO should not be cached by GPU. Coherent without having to flush
  153. * GPU caches explicitly
  154. *
  155. * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
  156. * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
  157. */
  158. #define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
  159. /* Flag that BO should be coherent across devices when using device-level
  160. * atomics. May depend on GPU instructions to flush caches to device scope
  161. * explicitly, promoting them to system scope automatically.
  162. *
  163. * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
  164. * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
  165. */
  166. #define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
  167. /* Set PTE.D and recompress during GTT->VRAM moves according to TILING flags. */
  168. #define AMDGPU_GEM_CREATE_GFX12_DCC (1 << 16)
  169. struct drm_amdgpu_gem_create_in {
  170. /** the requested memory size */
  171. __u64 bo_size;
  172. /** physical start_addr alignment in bytes for some HW requirements */
  173. __u64 alignment;
  174. /** the requested memory domains */
  175. __u64 domains;
  176. /** allocation flags */
  177. __u64 domain_flags;
  178. };
  179. struct drm_amdgpu_gem_create_out {
  180. /** returned GEM object handle */
  181. __u32 handle;
  182. __u32 _pad;
  183. };
  184. union drm_amdgpu_gem_create {
  185. struct drm_amdgpu_gem_create_in in;
  186. struct drm_amdgpu_gem_create_out out;
  187. };
  188. /** Opcode to create new residency list. */
  189. #define AMDGPU_BO_LIST_OP_CREATE 0
  190. /** Opcode to destroy previously created residency list */
  191. #define AMDGPU_BO_LIST_OP_DESTROY 1
  192. /** Opcode to update resource information in the list */
  193. #define AMDGPU_BO_LIST_OP_UPDATE 2
  194. struct drm_amdgpu_bo_list_in {
  195. /** Type of operation */
  196. __u32 operation;
  197. /** Handle of list or 0 if we want to create one */
  198. __u32 list_handle;
  199. /** Number of BOs in list */
  200. __u32 bo_number;
  201. /** Size of each element describing BO */
  202. __u32 bo_info_size;
  203. /** Pointer to array describing BOs */
  204. __u64 bo_info_ptr;
  205. };
  206. struct drm_amdgpu_bo_list_entry {
  207. /** Handle of BO */
  208. __u32 bo_handle;
  209. /** New (if specified) BO priority to be used during migration */
  210. __u32 bo_priority;
  211. };
  212. struct drm_amdgpu_bo_list_out {
  213. /** Handle of resource list */
  214. __u32 list_handle;
  215. __u32 _pad;
  216. };
  217. union drm_amdgpu_bo_list {
  218. struct drm_amdgpu_bo_list_in in;
  219. struct drm_amdgpu_bo_list_out out;
  220. };
  221. /* context related */
  222. #define AMDGPU_CTX_OP_ALLOC_CTX 1
  223. #define AMDGPU_CTX_OP_FREE_CTX 2
  224. #define AMDGPU_CTX_OP_QUERY_STATE 3
  225. #define AMDGPU_CTX_OP_QUERY_STATE2 4
  226. #define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
  227. #define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
  228. /* GPU reset status */
  229. #define AMDGPU_CTX_NO_RESET 0
  230. /* this the context caused it */
  231. #define AMDGPU_CTX_GUILTY_RESET 1
  232. /* some other context caused it */
  233. #define AMDGPU_CTX_INNOCENT_RESET 2
  234. /* unknown cause */
  235. #define AMDGPU_CTX_UNKNOWN_RESET 3
  236. /* indicate gpu reset occurred after ctx created */
  237. #define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
  238. /* indicate vram lost occurred after ctx created */
  239. #define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
  240. /* indicate some job from this context once cause gpu hang */
  241. #define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
  242. /* indicate some errors are detected by RAS */
  243. #define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
  244. #define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
  245. /* indicate that the reset hasn't completed yet */
  246. #define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
  247. /* Context priority level */
  248. #define AMDGPU_CTX_PRIORITY_UNSET -2048
  249. #define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
  250. #define AMDGPU_CTX_PRIORITY_LOW -512
  251. #define AMDGPU_CTX_PRIORITY_NORMAL 0
  252. /*
  253. * When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
  254. * CAP_SYS_NICE or DRM_MASTER
  255. */
  256. #define AMDGPU_CTX_PRIORITY_HIGH 512
  257. #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
  258. /* select a stable profiling pstate for perfmon tools */
  259. #define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
  260. #define AMDGPU_CTX_STABLE_PSTATE_NONE 0
  261. #define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
  262. #define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
  263. #define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
  264. #define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
  265. struct drm_amdgpu_ctx_in {
  266. /** AMDGPU_CTX_OP_* */
  267. __u32 op;
  268. /** Flags */
  269. __u32 flags;
  270. __u32 ctx_id;
  271. /** AMDGPU_CTX_PRIORITY_* */
  272. __s32 priority;
  273. };
  274. union drm_amdgpu_ctx_out {
  275. struct {
  276. __u32 ctx_id;
  277. __u32 _pad;
  278. } alloc;
  279. struct {
  280. /** For future use, no flags defined so far */
  281. __u64 flags;
  282. /** Number of resets caused by this context so far. */
  283. __u32 hangs;
  284. /** Reset status since the last call of the ioctl. */
  285. __u32 reset_status;
  286. } state;
  287. struct {
  288. __u32 flags;
  289. __u32 _pad;
  290. } pstate;
  291. };
  292. union drm_amdgpu_ctx {
  293. struct drm_amdgpu_ctx_in in;
  294. union drm_amdgpu_ctx_out out;
  295. };
  296. /* vm ioctl */
  297. #define AMDGPU_VM_OP_RESERVE_VMID 1
  298. #define AMDGPU_VM_OP_UNRESERVE_VMID 2
  299. struct drm_amdgpu_vm_in {
  300. /** AMDGPU_VM_OP_* */
  301. __u32 op;
  302. __u32 flags;
  303. };
  304. struct drm_amdgpu_vm_out {
  305. /** For future use, no flags defined so far */
  306. __u64 flags;
  307. };
  308. union drm_amdgpu_vm {
  309. struct drm_amdgpu_vm_in in;
  310. struct drm_amdgpu_vm_out out;
  311. };
  312. /* sched ioctl */
  313. #define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
  314. #define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2
  315. struct drm_amdgpu_sched_in {
  316. /* AMDGPU_SCHED_OP_* */
  317. __u32 op;
  318. __u32 fd;
  319. /** AMDGPU_CTX_PRIORITY_* */
  320. __s32 priority;
  321. __u32 ctx_id;
  322. };
  323. union drm_amdgpu_sched {
  324. struct drm_amdgpu_sched_in in;
  325. };
  326. /*
  327. * This is not a reliable API and you should expect it to fail for any
  328. * number of reasons and have fallback path that do not use userptr to
  329. * perform any operation.
  330. */
  331. #define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
  332. #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
  333. #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
  334. #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
  335. struct drm_amdgpu_gem_userptr {
  336. __u64 addr;
  337. __u64 size;
  338. /* AMDGPU_GEM_USERPTR_* */
  339. __u32 flags;
  340. /* Resulting GEM handle */
  341. __u32 handle;
  342. };
  343. /* SI-CI-VI: */
  344. /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
  345. #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
  346. #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
  347. #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
  348. #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
  349. #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
  350. #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
  351. #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
  352. #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
  353. #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
  354. #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
  355. #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
  356. #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
  357. #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
  358. #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
  359. #define AMDGPU_TILING_NUM_BANKS_SHIFT 21
  360. #define AMDGPU_TILING_NUM_BANKS_MASK 0x3
  361. /* GFX9 - GFX11: */
  362. #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
  363. #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
  364. #define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
  365. #define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
  366. #define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
  367. #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
  368. #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
  369. #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
  370. #define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
  371. #define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
  372. #define AMDGPU_TILING_SCANOUT_SHIFT 63
  373. #define AMDGPU_TILING_SCANOUT_MASK 0x1
  374. /* GFX12 and later: */
  375. #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
  376. #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
  377. /* These are DCC recompression setting for memory management: */
  378. #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
  379. #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 /* 0:64B, 1:128B, 2:256B */
  380. #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
  381. #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
  382. #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
  383. #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
  384. /* Set/Get helpers for tiling flags. */
  385. #define AMDGPU_TILING_SET(field, value) \
  386. (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
  387. #define AMDGPU_TILING_GET(value, field) \
  388. (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
  389. #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
  390. #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
  391. /** The same structure is shared for input/output */
  392. struct drm_amdgpu_gem_metadata {
  393. /** GEM Object handle */
  394. __u32 handle;
  395. /** Do we want get or set metadata */
  396. __u32 op;
  397. struct {
  398. /** For future use, no flags defined so far */
  399. __u64 flags;
  400. /** family specific tiling info */
  401. __u64 tiling_info;
  402. __u32 data_size_bytes;
  403. __u32 data[64];
  404. } data;
  405. };
  406. struct drm_amdgpu_gem_mmap_in {
  407. /** the GEM object handle */
  408. __u32 handle;
  409. __u32 _pad;
  410. };
  411. struct drm_amdgpu_gem_mmap_out {
  412. /** mmap offset from the vma offset manager */
  413. __u64 addr_ptr;
  414. };
  415. union drm_amdgpu_gem_mmap {
  416. struct drm_amdgpu_gem_mmap_in in;
  417. struct drm_amdgpu_gem_mmap_out out;
  418. };
  419. struct drm_amdgpu_gem_wait_idle_in {
  420. /** GEM object handle */
  421. __u32 handle;
  422. /** For future use, no flags defined so far */
  423. __u32 flags;
  424. /** Absolute timeout to wait */
  425. __u64 timeout;
  426. };
  427. struct drm_amdgpu_gem_wait_idle_out {
  428. /** BO status: 0 - BO is idle, 1 - BO is busy */
  429. __u32 status;
  430. /** Returned current memory domain */
  431. __u32 domain;
  432. };
  433. union drm_amdgpu_gem_wait_idle {
  434. struct drm_amdgpu_gem_wait_idle_in in;
  435. struct drm_amdgpu_gem_wait_idle_out out;
  436. };
  437. struct drm_amdgpu_wait_cs_in {
  438. /* Command submission handle
  439. * handle equals 0 means none to wait for
  440. * handle equals ~0ull means wait for the latest sequence number
  441. */
  442. __u64 handle;
  443. /** Absolute timeout to wait */
  444. __u64 timeout;
  445. __u32 ip_type;
  446. __u32 ip_instance;
  447. __u32 ring;
  448. __u32 ctx_id;
  449. };
  450. struct drm_amdgpu_wait_cs_out {
  451. /** CS status: 0 - CS completed, 1 - CS still busy */
  452. __u64 status;
  453. };
  454. union drm_amdgpu_wait_cs {
  455. struct drm_amdgpu_wait_cs_in in;
  456. struct drm_amdgpu_wait_cs_out out;
  457. };
  458. struct drm_amdgpu_fence {
  459. __u32 ctx_id;
  460. __u32 ip_type;
  461. __u32 ip_instance;
  462. __u32 ring;
  463. __u64 seq_no;
  464. };
  465. struct drm_amdgpu_wait_fences_in {
  466. /** This points to uint64_t * which points to fences */
  467. __u64 fences;
  468. __u32 fence_count;
  469. __u32 wait_all;
  470. __u64 timeout_ns;
  471. };
  472. struct drm_amdgpu_wait_fences_out {
  473. __u32 status;
  474. __u32 first_signaled;
  475. };
  476. union drm_amdgpu_wait_fences {
  477. struct drm_amdgpu_wait_fences_in in;
  478. struct drm_amdgpu_wait_fences_out out;
  479. };
  480. #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
  481. #define AMDGPU_GEM_OP_SET_PLACEMENT 1
  482. /* Sets or returns a value associated with a buffer. */
  483. struct drm_amdgpu_gem_op {
  484. /** GEM object handle */
  485. __u32 handle;
  486. /** AMDGPU_GEM_OP_* */
  487. __u32 op;
  488. /** Input or return value */
  489. __u64 value;
  490. };
  491. #define AMDGPU_VA_OP_MAP 1
  492. #define AMDGPU_VA_OP_UNMAP 2
  493. #define AMDGPU_VA_OP_CLEAR 3
  494. #define AMDGPU_VA_OP_REPLACE 4
  495. /* Delay the page table update till the next CS */
  496. #define AMDGPU_VM_DELAY_UPDATE (1 << 0)
  497. /* Mapping flags */
  498. /* readable mapping */
  499. #define AMDGPU_VM_PAGE_READABLE (1 << 1)
  500. /* writable mapping */
  501. #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
  502. /* executable mapping, new for VI */
  503. #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
  504. /* partially resident texture */
  505. #define AMDGPU_VM_PAGE_PRT (1 << 4)
  506. /* MTYPE flags use bit 5 to 8 */
  507. #define AMDGPU_VM_MTYPE_MASK (0xf << 5)
  508. /* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
  509. #define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
  510. /* Use Non Coherent MTYPE instead of default MTYPE */
  511. #define AMDGPU_VM_MTYPE_NC (1 << 5)
  512. /* Use Write Combine MTYPE instead of default MTYPE */
  513. #define AMDGPU_VM_MTYPE_WC (2 << 5)
  514. /* Use Cache Coherent MTYPE instead of default MTYPE */
  515. #define AMDGPU_VM_MTYPE_CC (3 << 5)
  516. /* Use UnCached MTYPE instead of default MTYPE */
  517. #define AMDGPU_VM_MTYPE_UC (4 << 5)
  518. /* Use Read Write MTYPE instead of default MTYPE */
  519. #define AMDGPU_VM_MTYPE_RW (5 << 5)
  520. /* don't allocate MALL */
  521. #define AMDGPU_VM_PAGE_NOALLOC (1 << 9)
  522. struct drm_amdgpu_gem_va {
  523. /** GEM object handle */
  524. __u32 handle;
  525. __u32 _pad;
  526. /** AMDGPU_VA_OP_* */
  527. __u32 operation;
  528. /** AMDGPU_VM_PAGE_* */
  529. __u32 flags;
  530. /** va address to assign . Must be correctly aligned.*/
  531. __u64 va_address;
  532. /** Specify offset inside of BO to assign. Must be correctly aligned.*/
  533. __u64 offset_in_bo;
  534. /** Specify mapping size. Must be correctly aligned. */
  535. __u64 map_size;
  536. };
  537. #define AMDGPU_HW_IP_GFX 0
  538. #define AMDGPU_HW_IP_COMPUTE 1
  539. #define AMDGPU_HW_IP_DMA 2
  540. #define AMDGPU_HW_IP_UVD 3
  541. #define AMDGPU_HW_IP_VCE 4
  542. #define AMDGPU_HW_IP_UVD_ENC 5
  543. #define AMDGPU_HW_IP_VCN_DEC 6
  544. /*
  545. * From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support
  546. * both encoding and decoding jobs.
  547. */
  548. #define AMDGPU_HW_IP_VCN_ENC 7
  549. #define AMDGPU_HW_IP_VCN_JPEG 8
  550. #define AMDGPU_HW_IP_VPE 9
  551. #define AMDGPU_HW_IP_NUM 10
  552. #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
  553. #define AMDGPU_CHUNK_ID_IB 0x01
  554. #define AMDGPU_CHUNK_ID_FENCE 0x02
  555. #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
  556. #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
  557. #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
  558. #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
  559. #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
  560. #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
  561. #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
  562. #define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
  563. struct drm_amdgpu_cs_chunk {
  564. __u32 chunk_id;
  565. __u32 length_dw;
  566. __u64 chunk_data;
  567. };
  568. struct drm_amdgpu_cs_in {
  569. /** Rendering context id */
  570. __u32 ctx_id;
  571. /** Handle of resource list associated with CS */
  572. __u32 bo_list_handle;
  573. __u32 num_chunks;
  574. __u32 flags;
  575. /** this points to __u64 * which point to cs chunks */
  576. __u64 chunks;
  577. };
  578. struct drm_amdgpu_cs_out {
  579. __u64 handle;
  580. };
  581. union drm_amdgpu_cs {
  582. struct drm_amdgpu_cs_in in;
  583. struct drm_amdgpu_cs_out out;
  584. };
  585. /* Specify flags to be used for IB */
  586. /* This IB should be submitted to CE */
  587. #define AMDGPU_IB_FLAG_CE (1<<0)
  588. /* Preamble flag, which means the IB could be dropped if no context switch */
  589. #define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
  590. /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
  591. #define AMDGPU_IB_FLAG_PREEMPT (1<<2)
  592. /* The IB fence should do the L2 writeback but not invalidate any shader
  593. * caches (L2/vL1/sL1/I$). */
  594. #define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
  595. /* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
  596. * This will reset wave ID counters for the IB.
  597. */
  598. #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
  599. /* Flag the IB as secure (TMZ)
  600. */
  601. #define AMDGPU_IB_FLAGS_SECURE (1 << 5)
  602. /* Tell KMD to flush and invalidate caches
  603. */
  604. #define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6)
  605. struct drm_amdgpu_cs_chunk_ib {
  606. __u32 _pad;
  607. /** AMDGPU_IB_FLAG_* */
  608. __u32 flags;
  609. /** Virtual address to begin IB execution */
  610. __u64 va_start;
  611. /** Size of submission */
  612. __u32 ib_bytes;
  613. /** HW IP to submit to */
  614. __u32 ip_type;
  615. /** HW IP index of the same type to submit to */
  616. __u32 ip_instance;
  617. /** Ring index to submit to */
  618. __u32 ring;
  619. };
  620. struct drm_amdgpu_cs_chunk_dep {
  621. __u32 ip_type;
  622. __u32 ip_instance;
  623. __u32 ring;
  624. __u32 ctx_id;
  625. __u64 handle;
  626. };
  627. struct drm_amdgpu_cs_chunk_fence {
  628. __u32 handle;
  629. __u32 offset;
  630. };
  631. struct drm_amdgpu_cs_chunk_sem {
  632. __u32 handle;
  633. };
  634. struct drm_amdgpu_cs_chunk_syncobj {
  635. __u32 handle;
  636. __u32 flags;
  637. __u64 point;
  638. };
  639. #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
  640. #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
  641. #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
  642. union drm_amdgpu_fence_to_handle {
  643. struct {
  644. struct drm_amdgpu_fence fence;
  645. __u32 what;
  646. __u32 pad;
  647. } in;
  648. struct {
  649. __u32 handle;
  650. } out;
  651. };
  652. struct drm_amdgpu_cs_chunk_data {
  653. union {
  654. struct drm_amdgpu_cs_chunk_ib ib_data;
  655. struct drm_amdgpu_cs_chunk_fence fence_data;
  656. };
  657. };
  658. #define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
  659. struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
  660. __u64 shadow_va;
  661. __u64 csa_va;
  662. __u64 gds_va;
  663. __u64 flags;
  664. };
  665. /*
  666. * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
  667. *
  668. */
  669. #define AMDGPU_IDS_FLAGS_FUSION 0x1
  670. #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
  671. #define AMDGPU_IDS_FLAGS_TMZ 0x4
  672. #define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
  673. /* indicate if acceleration can be working */
  674. #define AMDGPU_INFO_ACCEL_WORKING 0x00
  675. /* get the crtc_id from the mode object id? */
  676. #define AMDGPU_INFO_CRTC_FROM_ID 0x01
  677. /* query hw IP info */
  678. #define AMDGPU_INFO_HW_IP_INFO 0x02
  679. /* query hw IP instance count for the specified type */
  680. #define AMDGPU_INFO_HW_IP_COUNT 0x03
  681. /* timestamp for GL_ARB_timer_query */
  682. #define AMDGPU_INFO_TIMESTAMP 0x05
  683. /* Query the firmware version */
  684. #define AMDGPU_INFO_FW_VERSION 0x0e
  685. /* Subquery id: Query VCE firmware version */
  686. #define AMDGPU_INFO_FW_VCE 0x1
  687. /* Subquery id: Query UVD firmware version */
  688. #define AMDGPU_INFO_FW_UVD 0x2
  689. /* Subquery id: Query GMC firmware version */
  690. #define AMDGPU_INFO_FW_GMC 0x03
  691. /* Subquery id: Query GFX ME firmware version */
  692. #define AMDGPU_INFO_FW_GFX_ME 0x04
  693. /* Subquery id: Query GFX PFP firmware version */
  694. #define AMDGPU_INFO_FW_GFX_PFP 0x05
  695. /* Subquery id: Query GFX CE firmware version */
  696. #define AMDGPU_INFO_FW_GFX_CE 0x06
  697. /* Subquery id: Query GFX RLC firmware version */
  698. #define AMDGPU_INFO_FW_GFX_RLC 0x07
  699. /* Subquery id: Query GFX MEC firmware version */
  700. #define AMDGPU_INFO_FW_GFX_MEC 0x08
  701. /* Subquery id: Query SMC firmware version */
  702. #define AMDGPU_INFO_FW_SMC 0x0a
  703. /* Subquery id: Query SDMA firmware version */
  704. #define AMDGPU_INFO_FW_SDMA 0x0b
  705. /* Subquery id: Query PSP SOS firmware version */
  706. #define AMDGPU_INFO_FW_SOS 0x0c
  707. /* Subquery id: Query PSP ASD firmware version */
  708. #define AMDGPU_INFO_FW_ASD 0x0d
  709. /* Subquery id: Query VCN firmware version */
  710. #define AMDGPU_INFO_FW_VCN 0x0e
  711. /* Subquery id: Query GFX RLC SRLC firmware version */
  712. #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
  713. /* Subquery id: Query GFX RLC SRLG firmware version */
  714. #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
  715. /* Subquery id: Query GFX RLC SRLS firmware version */
  716. #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
  717. /* Subquery id: Query DMCU firmware version */
  718. #define AMDGPU_INFO_FW_DMCU 0x12
  719. #define AMDGPU_INFO_FW_TA 0x13
  720. /* Subquery id: Query DMCUB firmware version */
  721. #define AMDGPU_INFO_FW_DMCUB 0x14
  722. /* Subquery id: Query TOC firmware version */
  723. #define AMDGPU_INFO_FW_TOC 0x15
  724. /* Subquery id: Query CAP firmware version */
  725. #define AMDGPU_INFO_FW_CAP 0x16
  726. /* Subquery id: Query GFX RLCP firmware version */
  727. #define AMDGPU_INFO_FW_GFX_RLCP 0x17
  728. /* Subquery id: Query GFX RLCV firmware version */
  729. #define AMDGPU_INFO_FW_GFX_RLCV 0x18
  730. /* Subquery id: Query MES_KIQ firmware version */
  731. #define AMDGPU_INFO_FW_MES_KIQ 0x19
  732. /* Subquery id: Query MES firmware version */
  733. #define AMDGPU_INFO_FW_MES 0x1a
  734. /* Subquery id: Query IMU firmware version */
  735. #define AMDGPU_INFO_FW_IMU 0x1b
  736. /* Subquery id: Query VPE firmware version */
  737. #define AMDGPU_INFO_FW_VPE 0x1c
  738. /* number of bytes moved for TTM migration */
  739. #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
  740. /* the used VRAM size */
  741. #define AMDGPU_INFO_VRAM_USAGE 0x10
  742. /* the used GTT size */
  743. #define AMDGPU_INFO_GTT_USAGE 0x11
  744. /* Information about GDS, etc. resource configuration */
  745. #define AMDGPU_INFO_GDS_CONFIG 0x13
  746. /* Query information about VRAM and GTT domains */
  747. #define AMDGPU_INFO_VRAM_GTT 0x14
  748. /* Query information about register in MMR address space*/
  749. #define AMDGPU_INFO_READ_MMR_REG 0x15
  750. /* Query information about device: rev id, family, etc. */
  751. #define AMDGPU_INFO_DEV_INFO 0x16
  752. /* visible vram usage */
  753. #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
  754. /* number of TTM buffer evictions */
  755. #define AMDGPU_INFO_NUM_EVICTIONS 0x18
  756. /* Query memory about VRAM and GTT domains */
  757. #define AMDGPU_INFO_MEMORY 0x19
  758. /* Query vce clock table */
  759. #define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
  760. /* Query vbios related information */
  761. #define AMDGPU_INFO_VBIOS 0x1B
  762. /* Subquery id: Query vbios size */
  763. #define AMDGPU_INFO_VBIOS_SIZE 0x1
  764. /* Subquery id: Query vbios image */
  765. #define AMDGPU_INFO_VBIOS_IMAGE 0x2
  766. /* Subquery id: Query vbios info */
  767. #define AMDGPU_INFO_VBIOS_INFO 0x3
  768. /* Query UVD handles */
  769. #define AMDGPU_INFO_NUM_HANDLES 0x1C
  770. /* Query sensor related information */
  771. #define AMDGPU_INFO_SENSOR 0x1D
  772. /* Subquery id: Query GPU shader clock */
  773. #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1
  774. /* Subquery id: Query GPU memory clock */
  775. #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2
  776. /* Subquery id: Query GPU temperature */
  777. #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3
  778. /* Subquery id: Query GPU load */
  779. #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4
  780. /* Subquery id: Query average GPU power */
  781. #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5
  782. /* Subquery id: Query northbridge voltage */
  783. #define AMDGPU_INFO_SENSOR_VDDNB 0x6
  784. /* Subquery id: Query graphics voltage */
  785. #define AMDGPU_INFO_SENSOR_VDDGFX 0x7
  786. /* Subquery id: Query GPU stable pstate shader clock */
  787. #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
  788. /* Subquery id: Query GPU stable pstate memory clock */
  789. #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
  790. /* Subquery id: Query GPU peak pstate shader clock */
  791. #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
  792. /* Subquery id: Query GPU peak pstate memory clock */
  793. #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
  794. /* Subquery id: Query input GPU power */
  795. #define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
  796. /* Number of VRAM page faults on CPU access. */
  797. #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
  798. #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
  799. /* query ras mask of enabled features*/
  800. #define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
  801. /* RAS MASK: UMC (VRAM) */
  802. #define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
  803. /* RAS MASK: SDMA */
  804. #define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1)
  805. /* RAS MASK: GFX */
  806. #define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2)
  807. /* RAS MASK: MMHUB */
  808. #define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3)
  809. /* RAS MASK: ATHUB */
  810. #define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4)
  811. /* RAS MASK: PCIE */
  812. #define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5)
  813. /* RAS MASK: HDP */
  814. #define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6)
  815. /* RAS MASK: XGMI */
  816. #define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7)
  817. /* RAS MASK: DF */
  818. #define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8)
  819. /* RAS MASK: SMN */
  820. #define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9)
  821. /* RAS MASK: SEM */
  822. #define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10)
  823. /* RAS MASK: MP0 */
  824. #define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11)
  825. /* RAS MASK: MP1 */
  826. #define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
  827. /* RAS MASK: FUSE */
  828. #define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
  829. /* query video encode/decode caps */
  830. #define AMDGPU_INFO_VIDEO_CAPS 0x21
  831. /* Subquery id: Decode */
  832. #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
  833. /* Subquery id: Encode */
  834. #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
  835. /* Query the max number of IBs per gang per submission */
  836. #define AMDGPU_INFO_MAX_IBS 0x22
  837. /* query last page fault info */
  838. #define AMDGPU_INFO_GPUVM_FAULT 0x23
  839. #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
  840. #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
  841. #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
  842. #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
  843. struct drm_amdgpu_query_fw {
  844. /** AMDGPU_INFO_FW_* */
  845. __u32 fw_type;
  846. /**
  847. * Index of the IP if there are more IPs of
  848. * the same type.
  849. */
  850. __u32 ip_instance;
  851. /**
  852. * Index of the engine. Whether this is used depends
  853. * on the firmware type. (e.g. MEC, SDMA)
  854. */
  855. __u32 index;
  856. __u32 _pad;
  857. };
  858. /* Input structure for the INFO ioctl */
  859. struct drm_amdgpu_info {
  860. /* Where the return value will be stored */
  861. __u64 return_pointer;
  862. /* The size of the return value. Just like "size" in "snprintf",
  863. * it limits how many bytes the kernel can write. */
  864. __u32 return_size;
  865. /* The query request id. */
  866. __u32 query;
  867. union {
  868. struct {
  869. __u32 id;
  870. __u32 _pad;
  871. } mode_crtc;
  872. struct {
  873. /** AMDGPU_HW_IP_* */
  874. __u32 type;
  875. /**
  876. * Index of the IP if there are more IPs of the same
  877. * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
  878. */
  879. __u32 ip_instance;
  880. } query_hw_ip;
  881. struct {
  882. __u32 dword_offset;
  883. /** number of registers to read */
  884. __u32 count;
  885. __u32 instance;
  886. /** For future use, no flags defined so far */
  887. __u32 flags;
  888. } read_mmr_reg;
  889. struct drm_amdgpu_query_fw query_fw;
  890. struct {
  891. __u32 type;
  892. __u32 offset;
  893. } vbios_info;
  894. struct {
  895. __u32 type;
  896. } sensor_info;
  897. struct {
  898. __u32 type;
  899. } video_cap;
  900. };
  901. };
  902. struct drm_amdgpu_info_gds {
  903. /** GDS GFX partition size */
  904. __u32 gds_gfx_partition_size;
  905. /** GDS compute partition size */
  906. __u32 compute_partition_size;
  907. /** total GDS memory size */
  908. __u32 gds_total_size;
  909. /** GWS size per GFX partition */
  910. __u32 gws_per_gfx_partition;
  911. /** GSW size per compute partition */
  912. __u32 gws_per_compute_partition;
  913. /** OA size per GFX partition */
  914. __u32 oa_per_gfx_partition;
  915. /** OA size per compute partition */
  916. __u32 oa_per_compute_partition;
  917. __u32 _pad;
  918. };
  919. struct drm_amdgpu_info_vram_gtt {
  920. __u64 vram_size;
  921. __u64 vram_cpu_accessible_size;
  922. __u64 gtt_size;
  923. };
  924. struct drm_amdgpu_heap_info {
  925. /** max. physical memory */
  926. __u64 total_heap_size;
  927. /** Theoretical max. available memory in the given heap */
  928. __u64 usable_heap_size;
  929. /**
  930. * Number of bytes allocated in the heap. This includes all processes
  931. * and private allocations in the kernel. It changes when new buffers
  932. * are allocated, freed, and moved. It cannot be larger than
  933. * heap_size.
  934. */
  935. __u64 heap_usage;
  936. /**
  937. * Theoretical possible max. size of buffer which
  938. * could be allocated in the given heap
  939. */
  940. __u64 max_allocation;
  941. };
  942. struct drm_amdgpu_memory_info {
  943. struct drm_amdgpu_heap_info vram;
  944. struct drm_amdgpu_heap_info cpu_accessible_vram;
  945. struct drm_amdgpu_heap_info gtt;
  946. };
  947. struct drm_amdgpu_info_firmware {
  948. __u32 ver;
  949. __u32 feature;
  950. };
  951. struct drm_amdgpu_info_vbios {
  952. __u8 name[64];
  953. __u8 vbios_pn[64];
  954. __u32 version;
  955. __u32 pad;
  956. __u8 vbios_ver_str[32];
  957. __u8 date[32];
  958. };
  959. #define AMDGPU_VRAM_TYPE_UNKNOWN 0
  960. #define AMDGPU_VRAM_TYPE_GDDR1 1
  961. #define AMDGPU_VRAM_TYPE_DDR2 2
  962. #define AMDGPU_VRAM_TYPE_GDDR3 3
  963. #define AMDGPU_VRAM_TYPE_GDDR4 4
  964. #define AMDGPU_VRAM_TYPE_GDDR5 5
  965. #define AMDGPU_VRAM_TYPE_HBM 6
  966. #define AMDGPU_VRAM_TYPE_DDR3 7
  967. #define AMDGPU_VRAM_TYPE_DDR4 8
  968. #define AMDGPU_VRAM_TYPE_GDDR6 9
  969. #define AMDGPU_VRAM_TYPE_DDR5 10
  970. #define AMDGPU_VRAM_TYPE_LPDDR4 11
  971. #define AMDGPU_VRAM_TYPE_LPDDR5 12
  972. struct drm_amdgpu_info_device {
  973. /** PCI Device ID */
  974. __u32 device_id;
  975. /** Internal chip revision: A0, A1, etc.) */
  976. __u32 chip_rev;
  977. __u32 external_rev;
  978. /** Revision id in PCI Config space */
  979. __u32 pci_rev;
  980. __u32 family;
  981. __u32 num_shader_engines;
  982. __u32 num_shader_arrays_per_engine;
  983. /* in KHz */
  984. __u32 gpu_counter_freq;
  985. __u64 max_engine_clock;
  986. __u64 max_memory_clock;
  987. /* cu information */
  988. __u32 cu_active_number;
  989. /* NOTE: cu_ao_mask is INVALID, DON'T use it */
  990. __u32 cu_ao_mask;
  991. __u32 cu_bitmap[4][4];
  992. /** Render backend pipe mask. One render backend is CB+DB. */
  993. __u32 enabled_rb_pipes_mask;
  994. __u32 num_rb_pipes;
  995. __u32 num_hw_gfx_contexts;
  996. /* PCIe version (the smaller of the GPU and the CPU/motherboard) */
  997. __u32 pcie_gen;
  998. __u64 ids_flags;
  999. /** Starting virtual address for UMDs. */
  1000. __u64 virtual_address_offset;
  1001. /** The maximum virtual address */
  1002. __u64 virtual_address_max;
  1003. /** Required alignment of virtual addresses. */
  1004. __u32 virtual_address_alignment;
  1005. /** Page table entry - fragment size */
  1006. __u32 pte_fragment_size;
  1007. __u32 gart_page_size;
  1008. /** constant engine ram size*/
  1009. __u32 ce_ram_size;
  1010. /** video memory type info*/
  1011. __u32 vram_type;
  1012. /** video memory bit width*/
  1013. __u32 vram_bit_width;
  1014. /* vce harvesting instance */
  1015. __u32 vce_harvest_config;
  1016. /* gfx double offchip LDS buffers */
  1017. __u32 gc_double_offchip_lds_buf;
  1018. /* NGG Primitive Buffer */
  1019. __u64 prim_buf_gpu_addr;
  1020. /* NGG Position Buffer */
  1021. __u64 pos_buf_gpu_addr;
  1022. /* NGG Control Sideband */
  1023. __u64 cntl_sb_buf_gpu_addr;
  1024. /* NGG Parameter Cache */
  1025. __u64 param_buf_gpu_addr;
  1026. __u32 prim_buf_size;
  1027. __u32 pos_buf_size;
  1028. __u32 cntl_sb_buf_size;
  1029. __u32 param_buf_size;
  1030. /* wavefront size*/
  1031. __u32 wave_front_size;
  1032. /* shader visible vgprs*/
  1033. __u32 num_shader_visible_vgprs;
  1034. /* CU per shader array*/
  1035. __u32 num_cu_per_sh;
  1036. /* number of tcc blocks*/
  1037. __u32 num_tcc_blocks;
  1038. /* gs vgt table depth*/
  1039. __u32 gs_vgt_table_depth;
  1040. /* gs primitive buffer depth*/
  1041. __u32 gs_prim_buffer_depth;
  1042. /* max gs wavefront per vgt*/
  1043. __u32 max_gs_waves_per_vgt;
  1044. /* PCIe number of lanes (the smaller of the GPU and the CPU/motherboard) */
  1045. __u32 pcie_num_lanes;
  1046. /* always on cu bitmap */
  1047. __u32 cu_ao_bitmap[4][4];
  1048. /** Starting high virtual address for UMDs. */
  1049. __u64 high_va_offset;
  1050. /** The maximum high virtual address */
  1051. __u64 high_va_max;
  1052. /* gfx10 pa_sc_tile_steering_override */
  1053. __u32 pa_sc_tile_steering_override;
  1054. /* disabled TCCs */
  1055. __u64 tcc_disabled_mask;
  1056. __u64 min_engine_clock;
  1057. __u64 min_memory_clock;
  1058. /* The following fields are only set on gfx11+, older chips set 0. */
  1059. __u32 tcp_cache_size; /* AKA GL0, VMEM cache */
  1060. __u32 num_sqc_per_wgp;
  1061. __u32 sqc_data_cache_size; /* AKA SMEM cache */
  1062. __u32 sqc_inst_cache_size;
  1063. __u32 gl1c_cache_size;
  1064. __u32 gl2c_cache_size;
  1065. __u64 mall_size; /* AKA infinity cache */
  1066. /* high 32 bits of the rb pipes mask */
  1067. __u32 enabled_rb_pipes_mask_hi;
  1068. /* shadow area size for gfx11 */
  1069. __u32 shadow_size;
  1070. /* shadow area base virtual alignment for gfx11 */
  1071. __u32 shadow_alignment;
  1072. /* context save area size for gfx11 */
  1073. __u32 csa_size;
  1074. /* context save area base virtual alignment for gfx11 */
  1075. __u32 csa_alignment;
  1076. };
  1077. struct drm_amdgpu_info_hw_ip {
  1078. /** Version of h/w IP */
  1079. __u32 hw_ip_version_major;
  1080. __u32 hw_ip_version_minor;
  1081. /** Capabilities */
  1082. __u64 capabilities_flags;
  1083. /** command buffer address start alignment*/
  1084. __u32 ib_start_alignment;
  1085. /** command buffer size alignment*/
  1086. __u32 ib_size_alignment;
  1087. /** Bitmask of available rings. Bit 0 means ring 0, etc. */
  1088. __u32 available_rings;
  1089. /** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
  1090. __u32 ip_discovery_version;
  1091. };
  1092. struct drm_amdgpu_info_num_handles {
  1093. /** Max handles as supported by firmware for UVD */
  1094. __u32 uvd_max_handles;
  1095. /** Handles currently in use for UVD */
  1096. __u32 uvd_used_handles;
  1097. };
  1098. #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
  1099. struct drm_amdgpu_info_vce_clock_table_entry {
  1100. /** System clock */
  1101. __u32 sclk;
  1102. /** Memory clock */
  1103. __u32 mclk;
  1104. /** VCE clock */
  1105. __u32 eclk;
  1106. __u32 pad;
  1107. };
  1108. struct drm_amdgpu_info_vce_clock_table {
  1109. struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
  1110. __u32 num_valid_entries;
  1111. __u32 pad;
  1112. };
  1113. /* query video encode/decode caps */
  1114. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
  1115. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
  1116. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
  1117. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
  1118. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
  1119. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
  1120. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
  1121. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
  1122. #define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
  1123. struct drm_amdgpu_info_video_codec_info {
  1124. __u32 valid;
  1125. __u32 max_width;
  1126. __u32 max_height;
  1127. __u32 max_pixels_per_frame;
  1128. __u32 max_level;
  1129. __u32 pad;
  1130. };
  1131. struct drm_amdgpu_info_video_caps {
  1132. struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
  1133. };
  1134. #define AMDGPU_VMHUB_TYPE_MASK 0xff
  1135. #define AMDGPU_VMHUB_TYPE_SHIFT 0
  1136. #define AMDGPU_VMHUB_TYPE_GFX 0
  1137. #define AMDGPU_VMHUB_TYPE_MM0 1
  1138. #define AMDGPU_VMHUB_TYPE_MM1 2
  1139. #define AMDGPU_VMHUB_IDX_MASK 0xff00
  1140. #define AMDGPU_VMHUB_IDX_SHIFT 8
  1141. struct drm_amdgpu_info_gpuvm_fault {
  1142. __u64 addr;
  1143. __u32 status;
  1144. __u32 vmhub;
  1145. };
  1146. /*
  1147. * Supported GPU families
  1148. */
  1149. #define AMDGPU_FAMILY_UNKNOWN 0
  1150. #define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
  1151. #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
  1152. #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
  1153. #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
  1154. #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
  1155. #define AMDGPU_FAMILY_AI 141 /* Vega10 */
  1156. #define AMDGPU_FAMILY_RV 142 /* Raven */
  1157. #define AMDGPU_FAMILY_NV 143 /* Navi10 */
  1158. #define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
  1159. #define AMDGPU_FAMILY_GC_11_0_0 145 /* GC 11.0.0 */
  1160. #define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
  1161. #define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
  1162. #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
  1163. #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
  1164. #define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
  1165. #define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
  1166. /* FIXME wrong namespace! */
  1167. struct drm_color_ctm_3x4 {
  1168. /*
  1169. * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
  1170. * (not two's complement!) format.
  1171. */
  1172. __u64 matrix[12];
  1173. };
  1174. #if defined(__cplusplus)
  1175. }
  1176. #endif
  1177. #endif