logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

panthor_drm.h (28080B)


  1. /* SPDX-License-Identifier: MIT */
  2. /* Copyright (C) 2023 Collabora ltd. */
  3. #ifndef _PANTHOR_DRM_H_
  4. #define _PANTHOR_DRM_H_
  5. #include "drm.h"
  6. #if defined(__cplusplus)
  7. extern "C" {
  8. #endif
  9. /**
  10. * DOC: Introduction
  11. *
  12. * This documentation describes the Panthor IOCTLs.
  13. *
  14. * Just a few generic rules about the data passed to the Panthor IOCTLs:
  15. *
  16. * - Structures must be aligned on 64-bit/8-byte. If the object is not
  17. * naturally aligned, a padding field must be added.
  18. * - Fields must be explicitly aligned to their natural type alignment with
  19. * pad[0..N] fields.
  20. * - All padding fields will be checked by the driver to make sure they are
  21. * zeroed.
  22. * - Flags can be added, but not removed/replaced.
  23. * - New fields can be added to the main structures (the structures
  24. * directly passed to the ioctl). Those fields can be added at the end of
  25. * the structure, or replace existing padding fields. Any new field being
  26. * added must preserve the behavior that existed before those fields were
  27. * added when a value of zero is passed.
  28. * - New fields can be added to indirect objects (objects pointed by the
  29. * main structure), iff those objects are passed a size to reflect the
  30. * size known by the userspace driver (see drm_panthor_obj_array::stride
  31. * or drm_panthor_dev_query::size).
  32. * - If the kernel driver is too old to know some fields, those will be
  33. * ignored if zero, and otherwise rejected (and so will be zero on output).
  34. * - If userspace is too old to know some fields, those will be zeroed
  35. * (input) before the structure is parsed by the kernel driver.
  36. * - Each new flag/field addition must come with a driver version update so
  37. * the userspace driver doesn't have to trial and error to know which
  38. * flags are supported.
  39. * - Structures should not contain unions, as this would defeat the
  40. * extensibility of such structures.
  41. * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
  42. * at the end of the drm_panthor_ioctl_id enum.
  43. */
  44. /**
  45. * DOC: MMIO regions exposed to userspace.
  46. *
  47. * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
  48. *
  49. * File offset for all MMIO regions being exposed to userspace. Don't use
  50. * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
  51. * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
  52. * different offset on 32-bit and 64-bit systems.
  53. *
  54. * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
  55. *
  56. * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
  57. * GPU cache flushing through CS instructions, but the flush reduction
  58. * mechanism requires a flush_id. This flush_id could be queried with an
  59. * ioctl, but Arm provides a well-isolated register page containing only this
  60. * read-only register, so let's expose this page through a static mmap offset
  61. * and allow direct mapping of this MMIO region so we can avoid the
  62. * user <-> kernel round-trip.
  63. */
  64. #define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
  65. #define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
  66. #define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \
  67. DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
  68. DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
  69. #define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
  70. /**
  71. * DOC: IOCTL IDs
  72. *
  73. * enum drm_panthor_ioctl_id - IOCTL IDs
  74. *
  75. * Place new ioctls at the end, don't re-order, don't replace or remove entries.
  76. *
  77. * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
  78. * definitions instead.
  79. */
  80. enum drm_panthor_ioctl_id {
  81. /** @DRM_PANTHOR_DEV_QUERY: Query device information. */
  82. DRM_PANTHOR_DEV_QUERY = 0,
  83. /** @DRM_PANTHOR_VM_CREATE: Create a VM. */
  84. DRM_PANTHOR_VM_CREATE,
  85. /** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
  86. DRM_PANTHOR_VM_DESTROY,
  87. /** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
  88. DRM_PANTHOR_VM_BIND,
  89. /** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
  90. DRM_PANTHOR_VM_GET_STATE,
  91. /** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
  92. DRM_PANTHOR_BO_CREATE,
  93. /**
  94. * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
  95. * mmap to map a GEM object.
  96. */
  97. DRM_PANTHOR_BO_MMAP_OFFSET,
  98. /** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
  99. DRM_PANTHOR_GROUP_CREATE,
  100. /** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
  101. DRM_PANTHOR_GROUP_DESTROY,
  102. /**
  103. * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
  104. * to a specific scheduling group.
  105. */
  106. DRM_PANTHOR_GROUP_SUBMIT,
  107. /** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
  108. DRM_PANTHOR_GROUP_GET_STATE,
  109. /** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
  110. DRM_PANTHOR_TILER_HEAP_CREATE,
  111. /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
  112. DRM_PANTHOR_TILER_HEAP_DESTROY,
  113. };
  114. /**
  115. * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
  116. * @__access: Access type. Must be R, W or RW.
  117. * @__id: One of the DRM_PANTHOR_xxx id.
  118. * @__type: Suffix of the type being passed to the IOCTL.
  119. *
  120. * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
  121. * values instead.
  122. *
  123. * Return: An IOCTL number to be passed to ioctl() from userspace.
  124. */
  125. #define DRM_IOCTL_PANTHOR(__access, __id, __type) \
  126. DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
  127. struct drm_panthor_ ## __type)
  128. #define DRM_IOCTL_PANTHOR_DEV_QUERY \
  129. DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query)
  130. #define DRM_IOCTL_PANTHOR_VM_CREATE \
  131. DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create)
  132. #define DRM_IOCTL_PANTHOR_VM_DESTROY \
  133. DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy)
  134. #define DRM_IOCTL_PANTHOR_VM_BIND \
  135. DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind)
  136. #define DRM_IOCTL_PANTHOR_VM_GET_STATE \
  137. DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state)
  138. #define DRM_IOCTL_PANTHOR_BO_CREATE \
  139. DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create)
  140. #define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \
  141. DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset)
  142. #define DRM_IOCTL_PANTHOR_GROUP_CREATE \
  143. DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create)
  144. #define DRM_IOCTL_PANTHOR_GROUP_DESTROY \
  145. DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy)
  146. #define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \
  147. DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit)
  148. #define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \
  149. DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state)
  150. #define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \
  151. DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create)
  152. #define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \
  153. DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy)
  154. /**
  155. * DOC: IOCTL arguments
  156. */
  157. /**
  158. * struct drm_panthor_obj_array - Object array.
  159. *
  160. * This object is used to pass an array of objects whose size is subject to changes in
  161. * future versions of the driver. In order to support this mutability, we pass a stride
  162. * describing the size of the object as known by userspace.
  163. *
  164. * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
  165. * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
  166. * the object size.
  167. */
  168. struct drm_panthor_obj_array {
  169. /** @stride: Stride of object struct. Used for versioning. */
  170. __u32 stride;
  171. /** @count: Number of objects in the array. */
  172. __u32 count;
  173. /** @array: User pointer to an array of objects. */
  174. __u64 array;
  175. };
  176. /**
  177. * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
  178. * @cnt: Number of elements in the array.
  179. * @ptr: Pointer to the array to pass to the kernel.
  180. *
  181. * Macro initializing a drm_panthor_obj_array based on the object size as known
  182. * by userspace.
  183. */
  184. #define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
  185. { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
  186. /**
  187. * enum drm_panthor_sync_op_flags - Synchronization operation flags.
  188. */
  189. enum drm_panthor_sync_op_flags {
  190. /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
  191. DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
  192. /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
  193. DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
  194. /**
  195. * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
  196. * object type.
  197. */
  198. DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
  199. /** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
  200. DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
  201. /** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
  202. DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
  203. };
  204. /**
  205. * struct drm_panthor_sync_op - Synchronization operation.
  206. */
  207. struct drm_panthor_sync_op {
  208. /** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
  209. __u32 flags;
  210. /** @handle: Sync handle. */
  211. __u32 handle;
  212. /**
  213. * @timeline_value: MBZ if
  214. * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
  215. * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
  216. */
  217. __u64 timeline_value;
  218. };
  219. /**
  220. * enum drm_panthor_dev_query_type - Query type
  221. *
  222. * Place new types at the end, don't re-order, don't remove or replace.
  223. */
  224. enum drm_panthor_dev_query_type {
  225. /** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
  226. DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
  227. /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
  228. DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
  229. };
  230. /**
  231. * struct drm_panthor_gpu_info - GPU information
  232. *
  233. * Structure grouping all queryable information relating to the GPU.
  234. */
  235. struct drm_panthor_gpu_info {
  236. /** @gpu_id : GPU ID. */
  237. __u32 gpu_id;
  238. #define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
  239. #define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
  240. #define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
  241. #define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
  242. #define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
  243. #define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
  244. #define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
  245. /** @gpu_rev: GPU revision. */
  246. __u32 gpu_rev;
  247. /** @csf_id: Command stream frontend ID. */
  248. __u32 csf_id;
  249. #define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
  250. #define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
  251. #define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
  252. #define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
  253. #define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
  254. #define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
  255. /** @l2_features: L2-cache features. */
  256. __u32 l2_features;
  257. /** @tiler_features: Tiler features. */
  258. __u32 tiler_features;
  259. /** @mem_features: Memory features. */
  260. __u32 mem_features;
  261. /** @mmu_features: MMU features. */
  262. __u32 mmu_features;
  263. #define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
  264. /** @thread_features: Thread features. */
  265. __u32 thread_features;
  266. /** @max_threads: Maximum number of threads. */
  267. __u32 max_threads;
  268. /** @thread_max_workgroup_size: Maximum workgroup size. */
  269. __u32 thread_max_workgroup_size;
  270. /**
  271. * @thread_max_barrier_size: Maximum number of threads that can wait
  272. * simultaneously on a barrier.
  273. */
  274. __u32 thread_max_barrier_size;
  275. /** @coherency_features: Coherency features. */
  276. __u32 coherency_features;
  277. /** @texture_features: Texture features. */
  278. __u32 texture_features[4];
  279. /** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
  280. __u32 as_present;
  281. /** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
  282. __u64 shader_present;
  283. /** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
  284. __u64 l2_present;
  285. /** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
  286. __u64 tiler_present;
  287. /** @core_features: Used to discriminate core variants when they exist. */
  288. __u32 core_features;
  289. /** @pad: MBZ. */
  290. __u32 pad;
  291. };
  292. /**
  293. * struct drm_panthor_csif_info - Command stream interface information
  294. *
  295. * Structure grouping all queryable information relating to the command stream interface.
  296. */
  297. struct drm_panthor_csif_info {
  298. /** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
  299. __u32 csg_slot_count;
  300. /** @cs_slot_count: Number of command stream slots per group. */
  301. __u32 cs_slot_count;
  302. /** @cs_reg_count: Number of command stream registers. */
  303. __u32 cs_reg_count;
  304. /** @scoreboard_slot_count: Number of scoreboard slots. */
  305. __u32 scoreboard_slot_count;
  306. /**
  307. * @unpreserved_cs_reg_count: Number of command stream registers reserved by
  308. * the kernel driver to call a userspace command stream.
  309. *
  310. * All registers can be used by a userspace command stream, but the
  311. * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
  312. * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
  313. */
  314. __u32 unpreserved_cs_reg_count;
  315. /**
  316. * @pad: Padding field, set to zero.
  317. */
  318. __u32 pad;
  319. };
  320. /**
  321. * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
  322. */
  323. struct drm_panthor_dev_query {
  324. /** @type: the query type (see drm_panthor_dev_query_type). */
  325. __u32 type;
  326. /**
  327. * @size: size of the type being queried.
  328. *
  329. * If pointer is NULL, size is updated by the driver to provide the
  330. * output structure size. If pointer is not NULL, the driver will
  331. * only copy min(size, actual_structure_size) bytes to the pointer,
  332. * and update the size accordingly. This allows us to extend query
  333. * types without breaking userspace.
  334. */
  335. __u32 size;
  336. /**
  337. * @pointer: user pointer to a query type struct.
  338. *
  339. * Pointer can be NULL, in which case, nothing is copied, but the
  340. * actual structure size is returned. If not NULL, it must point to
  341. * a location that's large enough to hold size bytes.
  342. */
  343. __u64 pointer;
  344. };
  345. /**
  346. * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
  347. */
  348. struct drm_panthor_vm_create {
  349. /** @flags: VM flags, MBZ. */
  350. __u32 flags;
  351. /** @id: Returned VM ID. */
  352. __u32 id;
  353. /**
  354. * @user_va_range: Size of the VA space reserved for user objects.
  355. *
  356. * The kernel will pick the remaining space to map kernel-only objects to the
  357. * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
  358. * ...). If the space left for kernel objects is too small, kernel object
  359. * allocation will fail further down the road. One can use
  360. * drm_panthor_gpu_info::mmu_features to extract the total virtual address
  361. * range, and chose a user_va_range that leaves some space to the kernel.
  362. *
  363. * If user_va_range is zero, the kernel will pick a sensible value based on
  364. * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
  365. * split should leave enough VA space for userspace processes to support SVM,
  366. * while still allowing the kernel to map some amount of kernel objects in
  367. * the kernel VA range). The value chosen by the driver will be returned in
  368. * @user_va_range.
  369. *
  370. * User VA space always starts at 0x0, kernel VA space is always placed after
  371. * the user VA range.
  372. */
  373. __u64 user_va_range;
  374. };
  375. /**
  376. * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
  377. */
  378. struct drm_panthor_vm_destroy {
  379. /** @id: ID of the VM to destroy. */
  380. __u32 id;
  381. /** @pad: MBZ. */
  382. __u32 pad;
  383. };
  384. /**
  385. * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
  386. */
  387. enum drm_panthor_vm_bind_op_flags {
  388. /**
  389. * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
  390. *
  391. * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
  392. */
  393. DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
  394. /**
  395. * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
  396. *
  397. * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
  398. */
  399. DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
  400. /**
  401. * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
  402. *
  403. * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
  404. */
  405. DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
  406. /**
  407. * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
  408. */
  409. DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
  410. /** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
  411. DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
  412. /** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
  413. DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
  414. /**
  415. * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
  416. *
  417. * Just serves as a synchronization point on a VM queue.
  418. *
  419. * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
  420. * and drm_panthor_vm_bind_op::syncs contains at least one element.
  421. */
  422. DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
  423. };
  424. /**
  425. * struct drm_panthor_vm_bind_op - VM bind operation
  426. */
  427. struct drm_panthor_vm_bind_op {
  428. /** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
  429. __u32 flags;
  430. /**
  431. * @bo_handle: Handle of the buffer object to map.
  432. * MBZ for unmap or sync-only operations.
  433. */
  434. __u32 bo_handle;
  435. /**
  436. * @bo_offset: Buffer object offset.
  437. * MBZ for unmap or sync-only operations.
  438. */
  439. __u64 bo_offset;
  440. /**
  441. * @va: Virtual address to map/unmap.
  442. * MBZ for sync-only operations.
  443. */
  444. __u64 va;
  445. /**
  446. * @size: Size to map/unmap.
  447. * MBZ for sync-only operations.
  448. */
  449. __u64 size;
  450. /**
  451. * @syncs: Array of struct drm_panthor_sync_op synchronization
  452. * operations.
  453. *
  454. * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
  455. * the drm_panthor_vm_bind object containing this VM bind operation.
  456. *
  457. * This array shall not be empty for sync-only operations.
  458. */
  459. struct drm_panthor_obj_array syncs;
  460. };
  461. /**
  462. * enum drm_panthor_vm_bind_flags - VM bind flags
  463. */
  464. enum drm_panthor_vm_bind_flags {
  465. /**
  466. * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
  467. * queue instead of being executed synchronously.
  468. */
  469. DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
  470. };
  471. /**
  472. * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
  473. */
  474. struct drm_panthor_vm_bind {
  475. /** @vm_id: VM targeted by the bind request. */
  476. __u32 vm_id;
  477. /** @flags: Combination of drm_panthor_vm_bind_flags flags. */
  478. __u32 flags;
  479. /** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
  480. struct drm_panthor_obj_array ops;
  481. };
  482. /**
  483. * enum drm_panthor_vm_state - VM states.
  484. */
  485. enum drm_panthor_vm_state {
  486. /**
  487. * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
  488. *
  489. * New VM operations will be accepted on this VM.
  490. */
  491. DRM_PANTHOR_VM_STATE_USABLE,
  492. /**
  493. * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
  494. *
  495. * Something put the VM in an unusable state (like an asynchronous
  496. * VM_BIND request failing for any reason).
  497. *
  498. * Once the VM is in this state, all new MAP operations will be
  499. * rejected, and any GPU job targeting this VM will fail.
  500. * UNMAP operations are still accepted.
  501. *
  502. * The only way to recover from an unusable VM is to create a new
  503. * VM, and destroy the old one.
  504. */
  505. DRM_PANTHOR_VM_STATE_UNUSABLE,
  506. };
  507. /**
  508. * struct drm_panthor_vm_get_state - Get VM state.
  509. */
  510. struct drm_panthor_vm_get_state {
  511. /** @vm_id: VM targeted by the get_state request. */
  512. __u32 vm_id;
  513. /**
  514. * @state: state returned by the driver.
  515. *
  516. * Must be one of the enum drm_panthor_vm_state values.
  517. */
  518. __u32 state;
  519. };
  520. /**
  521. * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
  522. */
  523. enum drm_panthor_bo_flags {
  524. /** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
  525. DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
  526. };
  527. /**
  528. * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
  529. */
  530. struct drm_panthor_bo_create {
  531. /**
  532. * @size: Requested size for the object
  533. *
  534. * The (page-aligned) allocated size for the object will be returned.
  535. */
  536. __u64 size;
  537. /**
  538. * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
  539. */
  540. __u32 flags;
  541. /**
  542. * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
  543. *
  544. * If not zero, the field must refer to a valid VM ID, and implies that:
  545. * - the buffer object will only ever be bound to that VM
  546. * - cannot be exported as a PRIME fd
  547. */
  548. __u32 exclusive_vm_id;
  549. /**
  550. * @handle: Returned handle for the object.
  551. *
  552. * Object handles are nonzero.
  553. */
  554. __u32 handle;
  555. /** @pad: MBZ. */
  556. __u32 pad;
  557. };
  558. /**
  559. * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
  560. */
  561. struct drm_panthor_bo_mmap_offset {
  562. /** @handle: Handle of the object we want an mmap offset for. */
  563. __u32 handle;
  564. /** @pad: MBZ. */
  565. __u32 pad;
  566. /** @offset: The fake offset to use for subsequent mmap calls. */
  567. __u64 offset;
  568. };
  569. /**
  570. * struct drm_panthor_queue_create - Queue creation arguments.
  571. */
  572. struct drm_panthor_queue_create {
  573. /**
  574. * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
  575. * 15 being the highest priority.
  576. */
  577. __u8 priority;
  578. /** @pad: Padding fields, MBZ. */
  579. __u8 pad[3];
  580. /** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
  581. __u32 ringbuf_size;
  582. };
  583. /**
  584. * enum drm_panthor_group_priority - Scheduling group priority
  585. */
  586. enum drm_panthor_group_priority {
  587. /** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
  588. PANTHOR_GROUP_PRIORITY_LOW = 0,
  589. /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
  590. PANTHOR_GROUP_PRIORITY_MEDIUM,
  591. /**
  592. * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
  593. *
  594. * Requires CAP_SYS_NICE or DRM_MASTER.
  595. */
  596. PANTHOR_GROUP_PRIORITY_HIGH,
  597. };
  598. /**
  599. * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
  600. */
  601. struct drm_panthor_group_create {
  602. /** @queues: Array of drm_panthor_queue_create elements. */
  603. struct drm_panthor_obj_array queues;
  604. /**
  605. * @max_compute_cores: Maximum number of cores that can be used by compute
  606. * jobs across CS queues bound to this group.
  607. *
  608. * Must be less or equal to the number of bits set in @compute_core_mask.
  609. */
  610. __u8 max_compute_cores;
  611. /**
  612. * @max_fragment_cores: Maximum number of cores that can be used by fragment
  613. * jobs across CS queues bound to this group.
  614. *
  615. * Must be less or equal to the number of bits set in @fragment_core_mask.
  616. */
  617. __u8 max_fragment_cores;
  618. /**
  619. * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
  620. * across CS queues bound to this group.
  621. *
  622. * Must be less or equal to the number of bits set in @tiler_core_mask.
  623. */
  624. __u8 max_tiler_cores;
  625. /** @priority: Group priority (see enum drm_panthor_group_priority). */
  626. __u8 priority;
  627. /** @pad: Padding field, MBZ. */
  628. __u32 pad;
  629. /**
  630. * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
  631. *
  632. * This field must have at least @max_compute_cores bits set.
  633. *
  634. * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
  635. */
  636. __u64 compute_core_mask;
  637. /**
  638. * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
  639. *
  640. * This field must have at least @max_fragment_cores bits set.
  641. *
  642. * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
  643. */
  644. __u64 fragment_core_mask;
  645. /**
  646. * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
  647. *
  648. * This field must have at least @max_tiler_cores bits set.
  649. *
  650. * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
  651. */
  652. __u64 tiler_core_mask;
  653. /**
  654. * @vm_id: VM ID to bind this group to.
  655. *
  656. * All submission to queues bound to this group will use this VM.
  657. */
  658. __u32 vm_id;
  659. /**
  660. * @group_handle: Returned group handle. Passed back when submitting jobs or
  661. * destroying a group.
  662. */
  663. __u32 group_handle;
  664. };
  665. /**
  666. * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
  667. */
  668. struct drm_panthor_group_destroy {
  669. /** @group_handle: Group to destroy */
  670. __u32 group_handle;
  671. /** @pad: Padding field, MBZ. */
  672. __u32 pad;
  673. };
  674. /**
  675. * struct drm_panthor_queue_submit - Job submission arguments.
  676. *
  677. * This is describing the userspace command stream to call from the kernel
  678. * command stream ring-buffer. Queue submission is always part of a group
  679. * submission, taking one or more jobs to submit to the underlying queues.
  680. */
  681. struct drm_panthor_queue_submit {
  682. /** @queue_index: Index of the queue inside a group. */
  683. __u32 queue_index;
  684. /**
  685. * @stream_size: Size of the command stream to execute.
  686. *
  687. * Must be 64-bit/8-byte aligned (the size of a CS instruction)
  688. *
  689. * Can be zero if stream_addr is zero too.
  690. *
  691. * When the stream size is zero, the queue submit serves as a
  692. * synchronization point.
  693. */
  694. __u32 stream_size;
  695. /**
  696. * @stream_addr: GPU address of the command stream to execute.
  697. *
  698. * Must be aligned on 64-byte.
  699. *
  700. * Can be zero is stream_size is zero too.
  701. */
  702. __u64 stream_addr;
  703. /**
  704. * @latest_flush: FLUSH_ID read at the time the stream was built.
  705. *
  706. * This allows cache flush elimination for the automatic
  707. * flush+invalidate(all) done at submission time, which is needed to
  708. * ensure the GPU doesn't get garbage when reading the indirect command
  709. * stream buffers. If you want the cache flush to happen
  710. * unconditionally, pass a zero here.
  711. *
  712. * Ignored when stream_size is zero.
  713. */
  714. __u32 latest_flush;
  715. /** @pad: MBZ. */
  716. __u32 pad;
  717. /** @syncs: Array of struct drm_panthor_sync_op sync operations. */
  718. struct drm_panthor_obj_array syncs;
  719. };
  720. /**
  721. * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
  722. */
  723. struct drm_panthor_group_submit {
  724. /** @group_handle: Handle of the group to queue jobs to. */
  725. __u32 group_handle;
  726. /** @pad: MBZ. */
  727. __u32 pad;
  728. /** @queue_submits: Array of drm_panthor_queue_submit objects. */
  729. struct drm_panthor_obj_array queue_submits;
  730. };
  731. /**
  732. * enum drm_panthor_group_state_flags - Group state flags
  733. */
  734. enum drm_panthor_group_state_flags {
  735. /**
  736. * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
  737. *
  738. * When a group ends up with this flag set, no jobs can be submitted to its queues.
  739. */
  740. DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
  741. /**
  742. * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
  743. *
  744. * When a group ends up with this flag set, no jobs can be submitted to its queues.
  745. */
  746. DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
  747. };
  748. /**
  749. * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
  750. *
  751. * Used to query the state of a group and decide whether a new group should be created to
  752. * replace it.
  753. */
  754. struct drm_panthor_group_get_state {
  755. /** @group_handle: Handle of the group to query state on */
  756. __u32 group_handle;
  757. /**
  758. * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
  759. * group state.
  760. */
  761. __u32 state;
  762. /** @fatal_queues: Bitmask of queues that faced fatal faults. */
  763. __u32 fatal_queues;
  764. /** @pad: MBZ */
  765. __u32 pad;
  766. };
  767. /**
  768. * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
  769. */
  770. struct drm_panthor_tiler_heap_create {
  771. /** @vm_id: VM ID the tiler heap should be mapped to */
  772. __u32 vm_id;
  773. /** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
  774. __u32 initial_chunk_count;
  775. /**
  776. * @chunk_size: Chunk size.
  777. *
  778. * Must be page-aligned and lie in the [128k:8M] range.
  779. */
  780. __u32 chunk_size;
  781. /**
  782. * @max_chunks: Maximum number of chunks that can be allocated.
  783. *
  784. * Must be at least @initial_chunk_count.
  785. */
  786. __u32 max_chunks;
  787. /**
  788. * @target_in_flight: Maximum number of in-flight render passes.
  789. *
  790. * If the heap has more than tiler jobs in-flight, the FW will wait for render
  791. * passes to finish before queuing new tiler jobs.
  792. */
  793. __u32 target_in_flight;
  794. /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
  795. __u32 handle;
  796. /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
  797. __u64 tiler_heap_ctx_gpu_va;
  798. /**
  799. * @first_heap_chunk_gpu_va: First heap chunk.
  800. *
  801. * The tiler heap is formed of heap chunks forming a single-link list. This
  802. * is the first element in the list.
  803. */
  804. __u64 first_heap_chunk_gpu_va;
  805. };
  806. /**
  807. * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
  808. */
  809. struct drm_panthor_tiler_heap_destroy {
  810. /**
  811. * @handle: Handle of the tiler heap to destroy.
  812. *
  813. * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
  814. */
  815. __u32 handle;
  816. /** @pad: Padding field, MBZ. */
  817. __u32 pad;
  818. };
  819. #if defined(__cplusplus)
  820. }
  821. #endif
  822. #endif /* _PANTHOR_DRM_H_ */