-
Notifications
You must be signed in to change notification settings - Fork 0
/
zfs.txt
99 lines (67 loc) · 2.9 KB
/
zfs.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#Install ZFS support on Ubuntu
sudo apt install zfsutils-linux
#List current zpools
zpool list
#Create a new zpool
zpool create <pool name> /dev/sdb
#Check the status of a zpool
zpool status <pool name>
#Deleting the zpool
zpool destroy <pool name>
#Create a mirrored zpool
zpool create <pool name> mirror /dev/sdb /dev/sdc
#Expanding existing zpool with additional mirror
zpool add <pool name> mirror /dev/sdd /dev/sde
#Create a RAID 5 zpool
zpool create <pool name> raidz /dev/sdb /dev/sdc /dev/sdd
#Adding disk to raidz, this does not grow existing raid set, but creates additional set
zpool add <poolname> raidz /dev/sde /dev/sdf /dev/sdg
#Create a RAID 10 zpool
zpool create <pool name> mirror /dev/sdb /dev/sdc mirror /dev/sdd /dev/sde
#Simply replace failed disk sdc with sdd
zpool replace <pool name> /dev/sdc /dev/sdd
#Take a disk offline that is failing, then you can pull disk and replace
zpool offline <pool name> /dev/sdc
#Put replaced disk back online
zpool online <pool name> /dev/sdc
#If using a SSD log disk, be sure to mirror it up
#Add log to pool
zpool add <pool> log mirror /dev/sdd /dev/sde
#If using a SSD Read cache, a single drive is ok, if fails, zfs reads the actual disks
#Add cache to pool
zpool cache <pool> cache /dev/sde
#Scrub filesystem, similar to fsck in other filesystems, data is already scrubbed at read, this just does all
zpool scrub <pool>
-----------------------------------------------------------------------------------
#Create a zfs dataset within the pool
zfs create <pool>/<dataset>
#Delete zfs dataset
zfs destroy <pool>/<dataset>
#Create dataset within a dataset, inheritance is turned on by default
zfs create <pool>/<dataset>/<dataset>
#List all properties of a ZFS dataset
zfs get all <pool>/<dataset>
#Change mount point of a zfs dataset
sudo zfs set mountpoint=<new mount location> <pool>/<dataset>
#Set quota on dataset, setting it to 100m
zfs set quota=100m <pool>/<dataset>
#Reserve space out of main pool or dataset for this dataset in the future
zfs set reservation=100m <pool>/<dataset>
#Snapshots
zfs snapshot <pool>/<filesystem>@<snapname>
#Snapdir hidden by default
cd .zfs
cd <snapname>
#List snapshots
zfs list -t snapshot
#Clone, creates read/write version of snapshot
zfs clone <pool>/<dataset>@<snapname> <pool>/<newfilesystem>
#ZFS Send/Recieve to another pool
zfs send <pool>/<filesystem>@<snap> | zfs recv <secondpool>/<filesystem>
#ZFS Send/Recv to another pool on remote host
zfs send <pool>/<filesystem>@<snap> | ssh <username>@<hostname> zfs recv <secondpool>/<filesystem>
#ZFS Send/Recv to another pool on remote host incremental changes between snapshots
zfs send -i <pool>/<filesystem>@<oldsnap> <pool>/<filesystem>@<newsnap> | ssh <username>@<hostname> zfs recv <secondpool>/<filesystem>
#ZFS Export/Import - used as alterative to move data from one system to another
#Create a ZFS Volume, size can be something like 10G.
zfs create -V <size> <pool>/<name>