http://zfsonlinux.org/

https://launchpad.net/~zfs-native/+archive/stable

#echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu lucid main > /etc/apt/sources.list.d/zfs.list
#echo deb-src http://ppa.launchpad.net/zfs-native/stable/ubuntu lucid main >>  /etc/apt/sources.list.d/zfs.list
#apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F6B0FC61

wget http://archive.zfsonlinux.org/debian/pool/main/z/zfsonlinux/zfsonlinux_2%7Ewheezy_all.deb
dpkg -i zfsonlinux_2~wheezy_all.deb

aptitude update
aptitude install pve-headers-2.6.32-20-pve # latest pve kernel if running on pve systems; dkms only pulls in standard debian kernel headers
ln -s /lib/modules/2.6.32-12-pve/build /lib/modules/2.6.32-12-pve/source # pve headers don't add this necessary symlink - ZoL worked around this on 20120409, so it should be in their next release... see https://github.com/zfsonlinux/zfs/issues/630
#aptitude install ubuntu-zfs
aptitude install debian-zfs

aptitude install nfs-server # if sharing pool via. NFS

...edit /etc/default/zfs and set ZFS_MOUNT and ZFS_UNMOUNT to 'yes'

# apparently no longer needed with debian-zfs:
# update-rc.d zfs-mount defaults

see http://pve.proxmox.com/wiki/ZFS

aptitude install smartmontools

uncomment start_smartd="yes" in /etc/default/smartmontools

/etc/init.d/smartmontools start

# for reading disk serial numbers:
aptitude install --without-recommends hdparm
zpool create -f -o ashift=12 tank raidz2 /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh /dev/sdi /dev/sdj /dev/sdk /dev/sdl /dev/sdm /dev/sdn /dev/sdo /dev/sdp /dev/sdq /dev/sdr /dev/sds /dev/sdt /dev/sdu /dev/sdv

# tune for performance -- see http://www.nerdblog.com/2013/10/zfs-xattr-tuning-on-linux.html
zfs set xattr=sa tank

# add l2arc on ssd
lvcreate --size=30G --name=l2arc ssd
zfs set secondarycache=metadata tank # by default, this is set to "all" (caches both data and metadata) -- we have metadata performance problems, so we'll rely on RAM (primary cache) for caching data
zpool add tank cache /dev/ssd/l2arc

# create zfs filesystem:
zfs create tank/<filesystem_name>

# setting filesystem quota:
zfs set quota=100g tank/<filesystem_name>

# create zvol:
zfs create -V <size> tank/<volume_name>

# disable atime for a filesystem:
zfs set atime=off tank/<filesystem_name>

# reset local value and inherit from parent instead
zfs inherit atime tank/<filesystem_name>

# enable deduplication on a filesystem:
zfs set dedup=on tank/<filesystem_name>

# enable compression on a filesystem:
zfs set compression=lz4 tank/<filesystem_name>

when updating kernel…

ln -s /lib/modules/2.6.32-12-pve/build /lib/modules/2.6.32-12-pve/source # pve headers don't add this necessary symlink - ZoL worked around this on 20120409, so it should be in their next release... see https://github.com/zfsonlinux/zfs/issues/630
dkms status # to figure out what version of spl and zfs to use
dkms -m spl -v 0.6.2 -k 2.6.32-24-pve build
dkms -m spl -v 0.6.2 -k 2.6.32-24-pve install
dkms -m zfs -v 0.6.2 -k 2.6.32-24-pve build
dkms -m zfs -v 0.6.2 -k 2.6.32-24-pve install

importing pools with different device names…

zpool export tank
zpool import -d /dev/disk/by-path/
#zpool import -d /dev/disk/by-id/  # use only on squeeze-based systems which don't create by-path nodes
(this will show a preview of what it found / what names it will use)
zpool import -d /dev/disk/by-path/ tank
#zpool import -d /dev/disk/by-id/ tank  # use only on squeeze-based systems which don't create by-path nodes

replacing a failed device…

# if the failure was temporary (e.g. bad cable), try clearing errors to rebuild array
zpool clear tank

# test to make sure it's working
zpool scrub tank

# when that fails, offline the device:
zpool offline tank /dev/disk/by-path/whatever
zpool offline tank wwn-{whatever}
# swap disks, then replace it:
zpool replace tank /dev/disk/by-path/whatever
zpool replace tank wwn-{whatever} /dev/disk/by-id/wwn-{new_whatever}
#...or maybe it's needed twice in a row?  docs are unclear

# improve resilver speed:
# (see https://www.reddit.com/r/zfs/comments/4192js/resilvering_raidz_why_so_incredibly_slow/cz0z0xj/)
echo 0 > /sys/module/zfs/parameters/zfs_resilver_delay
echo 512 > /sys/module/zfs/parameters/zfs_top_maxinflight
echo 5000 > /sys/module/zfs/parameters/zfs_resilver_min_time_ms

# put them back to defaults (when done):
echo 2 > /sys/module/zfs/parameters/zfs_resilver_delay
echo 32 > /sys/module/zfs/parameters/zfs_top_maxinflight
echo 3000 > /sys/module/zfs/parameters/zfs_resilver_min_time_ms

# temporarily disconnecting a device:
zpool offline tank /dev/disk/by-path/whatever
# inspect disk, then reconnect:
zpool online tank /dev/disk/by-path/whatever

migrating data from one system to another:

# (you probably want to run this stuff in screen or tmux)
# migrate is snapshot name
# tank/filesystem is filesystem to migrate
# 44332 is random TCP port number

node1# zfs list # to display filesystem size for comparison to pv output
node1# zfs snapshot tank/filesystem@migrate
node2# nc -l -p 44332 | zfs receive tank/filesystem
node1# zfs send tank/filesystem@migrate | pv | nc node2 44332
# piping through pv is optional, but handy to monitor transfer speed/progress
node1# zfs destroy tank/filesystem@migrate

#or just use rsync:
newnode# rsync -aixXHWv --del --numeric-ids --progress -e "ssh -T -c arcfour -o Compression=No -x" oldnode:/tank/{filesystem} /tank/
#rsync all:
newnode# rsync -aiXHWv --del --numeric-ids --progress -e "ssh -T -c arcfour -o Compression=No -x" oldnode:/tank/ /tank/
# (arcfour compression used for speed on old host; might need something more modern... also recommended to run multiple instances due to single-threaded SSH slowness)

other cool stuff…

# shows statistics:
zpool iostat -v
iostat -x
# or, auto-updating version:
zpool iostat -v 1
iostst -x 1

# shows previous actions taken on a pool:
zpool history

# monitor network usage:
nload
computer/zfs_on_linux.txt · Last modified: 2019/01/20 04:37 by tdobes
Recent changes RSS feed Driven by DokuWiki Valid XHTML 1.0 Valid CSS