logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

zpool-status.8 (12660B)


  1. .\"
  2. .\" CDDL HEADER START
  3. .\"
  4. .\" The contents of this file are subject to the terms of the
  5. .\" Common Development and Distribution License (the "License").
  6. .\" You may not use this file except in compliance with the License.
  7. .\"
  8. .\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  9. .\" or https://opensource.org/licenses/CDDL-1.0.
  10. .\" See the License for the specific language governing permissions
  11. .\" and limitations under the License.
  12. .\"
  13. .\" When distributing Covered Code, include this CDDL HEADER in each
  14. .\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15. .\" If applicable, add the following below this CDDL HEADER, with the
  16. .\" fields enclosed by brackets "[]" replaced with your own identifying
  17. .\" information: Portions Copyright [yyyy] [name of copyright owner]
  18. .\"
  19. .\" CDDL HEADER END
  20. .\"
  21. .\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
  22. .\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
  23. .\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
  24. .\" Copyright (c) 2017 Datto Inc.
  25. .\" Copyright (c) 2018 George Melikov. All Rights Reserved.
  26. .\" Copyright 2017 Nexenta Systems, Inc.
  27. .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
  28. .\"
  29. .Dd February 14, 2024
  30. .Dt ZPOOL-STATUS 8
  31. .Os
  32. .
  33. .Sh NAME
  34. .Nm zpool-status
  35. .Nd show detailed health status for ZFS storage pools
  36. .Sh SYNOPSIS
  37. .Nm zpool
  38. .Cm status
  39. .Op Fl dDegiLpPstvx
  40. .Op Fl T Sy u Ns | Ns Sy d
  41. .Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
  42. .Oo Ar pool Oc Ns …
  43. .Op Ar interval Op Ar count
  44. .Op Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
  45. .
  46. .Sh DESCRIPTION
  47. Displays the detailed health status for the given pools.
  48. If no
  49. .Ar pool
  50. is specified, then the status of each pool in the system is displayed.
  51. For more information on pool and device health, see the
  52. .Sx Device Failure and Recovery
  53. section of
  54. .Xr zpoolconcepts 7 .
  55. .Pp
  56. If a scrub or resilver is in progress, this command reports the percentage done
  57. and the estimated time to completion.
  58. Both of these are only approximate, because the amount of data in the pool and
  59. the other workloads on the system can change.
  60. .Bl -tag -width Ds
  61. .It Fl -power
  62. Display vdev enclosure slot power status (on or off).
  63. .It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns …
  64. Run a script (or scripts) on each vdev and include the output as a new column
  65. in the
  66. .Nm zpool Cm status
  67. output.
  68. See the
  69. .Fl c
  70. option of
  71. .Nm zpool Cm iostat
  72. for complete details.
  73. .It Fl j , -json Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
  74. Display the status for ZFS pools in JSON format.
  75. Specify
  76. .Sy --json-int
  77. to display numbers in integer format instead of strings.
  78. Specify
  79. .Sy --json-flat-vdevs
  80. to display vdevs in flat hierarchy instead of nested vdev objects.
  81. Specify
  82. .Sy --json-pool-key-guid
  83. to set pool GUID as key for pool objects instead of pool names.
  84. .It Fl d
  85. Display the number of Direct I/O read/write checksum verify errors that have
  86. occured on a top-level VDEV.
  87. See
  88. .Sx zfs_vdev_direct_write_verify
  89. in
  90. .Xr zfs 4
  91. for details about the conditions that can cause Direct I/O write checksum
  92. verify failures to occur.
  93. Direct I/O reads checksum verify errors can also occur if the contents of the
  94. buffer are being manipulated after the I/O has been issued and is in flight.
  95. In the case of Direct I/O read checksum verify errors, the I/O will be reissued
  96. through the ARC.
  97. .It Fl D
  98. Display a histogram of deduplication statistics, showing the allocated
  99. .Pq physically present on disk
  100. and referenced
  101. .Pq logically referenced in the pool
  102. block counts and sizes by reference count.
  103. If repeated, (-DD), also shows statistics on how much of the DDT is resident
  104. in the ARC.
  105. .It Fl e
  106. Only show unhealthy vdevs (not-ONLINE or with errors).
  107. .It Fl g
  108. Display vdev GUIDs instead of the normal device names
  109. These GUIDs can be used in place of device names for the zpool
  110. detach/offline/remove/replace commands.
  111. .It Fl i
  112. Display vdev initialization status.
  113. .It Fl L
  114. Display real paths for vdevs resolving all symbolic links.
  115. This can be used to look up the current block device name regardless of the
  116. .Pa /dev/disk/
  117. path used to open it.
  118. .It Fl p
  119. Display numbers in parsable (exact) values.
  120. .It Fl P
  121. Display full paths for vdevs instead of only the last component of
  122. the path.
  123. This can be used in conjunction with the
  124. .Fl L
  125. flag.
  126. .It Fl s
  127. Display the number of leaf vdev slow I/O operations.
  128. This is the number of I/O operations that didn't complete in
  129. .Sy zio_slow_io_ms
  130. milliseconds
  131. .Pq Sy 30000 No by default .
  132. This does not necessarily mean the I/O operations failed to complete, just took
  133. an
  134. unreasonably long amount of time.
  135. This may indicate a problem with the underlying storage.
  136. .It Fl t
  137. Display vdev TRIM status.
  138. .It Fl T Sy u Ns | Ns Sy d
  139. Display a time stamp.
  140. Specify
  141. .Sy u
  142. for a printed representation of the internal representation of time.
  143. See
  144. .Xr time 1 .
  145. Specify
  146. .Sy d
  147. for standard date format.
  148. See
  149. .Xr date 1 .
  150. .It Fl v
  151. Displays verbose data error information, printing out a complete list of all
  152. data errors since the last complete pool scrub.
  153. If the head_errlog feature is enabled and files containing errors have been
  154. removed then the respective filenames will not be reported in subsequent runs
  155. of this command.
  156. .It Fl x
  157. Only display status for pools that are exhibiting errors or are otherwise
  158. unavailable.
  159. Warnings about pools not using the latest on-disk format will not be included.
  160. .El
  161. .
  162. .Sh EXAMPLES
  163. .\" These are, respectively, examples 16 from zpool.8
  164. .\" Make sure to update them bidirectionally
  165. .Ss Example 1 : No Adding output columns
  166. Additional columns can be added to the
  167. .Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
  168. .Bd -literal -compact -offset Ds
  169. .No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
  170. NAME STATE READ WRITE CKSUM vendor model size
  171. tank ONLINE 0 0 0
  172. mirror-0 ONLINE 0 0 0
  173. U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
  174. U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
  175. U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
  176. U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
  177. U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
  178. U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
  179. .No # Nm zpool Cm iostat Fl vc Pa size
  180. capacity operations bandwidth
  181. pool alloc free read write read write size
  182. ---------- ----- ----- ----- ----- ----- ----- ----
  183. rpool 14.6G 54.9G 4 55 250K 2.69M
  184. sda1 14.6G 54.9G 4 55 250K 2.69M 70G
  185. ---------- ----- ----- ----- ----- ----- ----- ----
  186. .Ed
  187. .
  188. .Ss Example 2 : No Display the status output in JSON format
  189. .Nm zpool Cm status No can output in JSON format if
  190. .Fl j
  191. is specified.
  192. .Fl c
  193. can be used to run a script on each VDEV.
  194. .Bd -literal -compact -offset Ds
  195. .No # Nm zpool Cm status Fl j Fl c Pa vendor , Ns Pa model , Ns Pa size | Nm jq
  196. {
  197. "output_version": {
  198. "command": "zpool status",
  199. "vers_major": 0,
  200. "vers_minor": 1
  201. },
  202. "pools": {
  203. "tank": {
  204. "name": "tank",
  205. "state": "ONLINE",
  206. "guid": "3920273586464696295",
  207. "txg": "16597",
  208. "spa_version": "5000",
  209. "zpl_version": "5",
  210. "status": "OK",
  211. "vdevs": {
  212. "tank": {
  213. "name": "tank",
  214. "alloc_space": "62.6G",
  215. "total_space": "15.0T",
  216. "def_space": "11.3T",
  217. "read_errors": "0",
  218. "write_errors": "0",
  219. "checksum_errors": "0",
  220. "vdevs": {
  221. "raidz1-0": {
  222. "name": "raidz1-0",
  223. "vdev_type": "raidz",
  224. "guid": "763132626387621737",
  225. "state": "HEALTHY",
  226. "alloc_space": "62.5G",
  227. "total_space": "10.9T",
  228. "def_space": "7.26T",
  229. "rep_dev_size": "10.9T",
  230. "read_errors": "0",
  231. "write_errors": "0",
  232. "checksum_errors": "0",
  233. "vdevs": {
  234. "ca1eb824-c371-491d-ac13-37637e35c683": {
  235. "name": "ca1eb824-c371-491d-ac13-37637e35c683",
  236. "vdev_type": "disk",
  237. "guid": "12841765308123764671",
  238. "path": "/dev/disk/by-partuuid/ca1eb824-c371-491d-ac13-37637e35c683",
  239. "state": "HEALTHY",
  240. "rep_dev_size": "3.64T",
  241. "phys_space": "3.64T",
  242. "read_errors": "0",
  243. "write_errors": "0",
  244. "checksum_errors": "0",
  245. "vendor": "ATA",
  246. "model": "WDC WD40EFZX-68AWUN0",
  247. "size": "3.6T"
  248. },
  249. "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7": {
  250. "name": "97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
  251. "vdev_type": "disk",
  252. "guid": "1527839927278881561",
  253. "path": "/dev/disk/by-partuuid/97cd98fb-8fb8-4ac4-bc84-bd8950a7ace7",
  254. "state": "HEALTHY",
  255. "rep_dev_size": "3.64T",
  256. "phys_space": "3.64T",
  257. "read_errors": "0",
  258. "write_errors": "0",
  259. "checksum_errors": "0",
  260. "vendor": "ATA",
  261. "model": "WDC WD40EFZX-68AWUN0",
  262. "size": "3.6T"
  263. },
  264. "e9ddba5f-f948-4734-a472-cb8aa5f0ff65": {
  265. "name": "e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
  266. "vdev_type": "disk",
  267. "guid": "6982750226085199860",
  268. "path": "/dev/disk/by-partuuid/e9ddba5f-f948-4734-a472-cb8aa5f0ff65",
  269. "state": "HEALTHY",
  270. "rep_dev_size": "3.64T",
  271. "phys_space": "3.64T",
  272. "read_errors": "0",
  273. "write_errors": "0",
  274. "checksum_errors": "0",
  275. "vendor": "ATA",
  276. "model": "WDC WD40EFZX-68AWUN0",
  277. "size": "3.6T"
  278. }
  279. }
  280. }
  281. }
  282. }
  283. },
  284. "dedup": {
  285. "mirror-2": {
  286. "name": "mirror-2",
  287. "vdev_type": "mirror",
  288. "guid": "2227766268377771003",
  289. "state": "HEALTHY",
  290. "alloc_space": "89.1M",
  291. "total_space": "3.62T",
  292. "def_space": "3.62T",
  293. "rep_dev_size": "3.62T",
  294. "read_errors": "0",
  295. "write_errors": "0",
  296. "checksum_errors": "0",
  297. "vdevs": {
  298. "db017360-d8e9-4163-961b-144ca75293a3": {
  299. "name": "db017360-d8e9-4163-961b-144ca75293a3",
  300. "vdev_type": "disk",
  301. "guid": "17880913061695450307",
  302. "path": "/dev/disk/by-partuuid/db017360-d8e9-4163-961b-144ca75293a3",
  303. "state": "HEALTHY",
  304. "rep_dev_size": "3.63T",
  305. "phys_space": "3.64T",
  306. "read_errors": "0",
  307. "write_errors": "0",
  308. "checksum_errors": "0",
  309. "vendor": "ATA",
  310. "model": "WDC WD40EFZX-68AWUN0",
  311. "size": "3.6T"
  312. },
  313. "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f": {
  314. "name": "952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
  315. "vdev_type": "disk",
  316. "guid": "10276374011610020557",
  317. "path": "/dev/disk/by-partuuid/952c3baf-b08a-4a8c-b7fa-33a07af5fe6f",
  318. "state": "HEALTHY",
  319. "rep_dev_size": "3.63T",
  320. "phys_space": "3.64T",
  321. "read_errors": "0",
  322. "write_errors": "0",
  323. "checksum_errors": "0",
  324. "vendor": "ATA",
  325. "model": "WDC WD40EFZX-68AWUN0",
  326. "size": "3.6T"
  327. }
  328. }
  329. }
  330. },
  331. "special": {
  332. "25d418f8-92bd-4327-b59f-7ef5d5f50d81": {
  333. "name": "25d418f8-92bd-4327-b59f-7ef5d5f50d81",
  334. "vdev_type": "disk",
  335. "guid": "3935742873387713123",
  336. "path": "/dev/disk/by-partuuid/25d418f8-92bd-4327-b59f-7ef5d5f50d81",
  337. "state": "HEALTHY",
  338. "alloc_space": "37.4M",
  339. "total_space": "444G",
  340. "def_space": "444G",
  341. "rep_dev_size": "444G",
  342. "phys_space": "447G",
  343. "read_errors": "0",
  344. "write_errors": "0",
  345. "checksum_errors": "0",
  346. "vendor": "ATA",
  347. "model": "Micron_5300_MTFDDAK480TDS",
  348. "size": "447.1G"
  349. }
  350. },
  351. "error_count": "0"
  352. }
  353. }
  354. }
  355. .Ed
  356. .
  357. .Sh SEE ALSO
  358. .Xr zpool-events 8 ,
  359. .Xr zpool-history 8 ,
  360. .Xr zpool-iostat 8 ,
  361. .Xr zpool-list 8 ,
  362. .Xr zpool-resilver 8 ,
  363. .Xr zpool-scrub 8 ,
  364. .Xr zpool-wait 8