* 테스트 환경
구분 | OS | IP |
glusterfs-server1 | CentOs 6.4 64bit | 192.168.119.11 |
glusterfs-server2 | CentOs 6.4 64bit | 192.168.119.12 |
glusterfs-server3 | CentOs 6.4 64bit | 192.168.119.13 |
glusterfs-serve4 | CentOs 6.4 64bit | 192.168.119.14 |
glusterfs-client | CentOs 6.4 64bit | 192.168.119.100 |
※ VM웨어를 통한 테스트, 최소 4개의 서버 노드가 필요로 함 또한, 버전 은 서버와 클라이언트 간 동일해야
정상적으로 진행 됨. 그렇지 않을경우 제대로 마운트 안됨
1, glusterfs server
## glusterfs-server1~4 모두다 동일하게 세팅
[root@localhost / ]# cd /etc/yum.repos.d/
[root@localhost yum.repos.d]# wget http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.1/EPEL.repo/glusterfs-epel.repo
[root@localhost yum.repos.d]# yum repolist
[root@localhost yum.repos.d]# yum install glusterfs-server
==============================================
** error 나면 glusterfs-epel.repo 파일 아래와 같이 수정
** error 나면 glusterfs-epel.repo 파일 아래와 같이 수정
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
=============================================================
[root@localhost yum.repos.d]# chkconfig --level 235 glusterd on
[root@localhost yum.repos.d]# service glusterd start
##glusterfs-server1(192.168.119.11) 서버에서 세팅 (4개중 1개 어느곳에 해도 상관 없음)
gluster peer 등록을 아이피로 진행 하는데 별도로 호스트 이름으로 해도 무방
[root@localhost ~]# gluster peer probe 192.168.119.12
peer probe: success.
[root@localhost ~]# gluster peer probe 192.168.119.13
peer probe: success.
[root@localhost ~]# gluster peer probe 192.168.119.14
peer probe: success.
[root@localhost ~]# gluster peer status
Number of Peers: 3
Hostname: 192.168.119.12
Uuid: 9045836a-5d65-4dd9-86c1-0c019de6e62e
State: Peer in Cluster (Connected)
Hostname: 192.168.119.13
Uuid: 383426cf-002b-4e34-8336-cb6aa7f26863
State: Peer in Cluster (Connected)
Hostname: 192.168.119.14
Uuid: 4027ebf4-23b5-4c89-a0d8-d93a5939b5e4
State: Peer in Cluster (Connected)
[root@localhost ~]# gluster volume create dist_vol 192.168.119.11:/data2/data 192.168.119.12:/data2/data 192.168.119.13:/data2/data 192.168.119.14:/data2/data
volume create: dist_vol: success: please start the volume to access datata2/data
[root@localhost ~]# gluster volume info dist_vol
Volume Name: dist_vol
Type: Distribute
Volume ID: 0556da7f-126e-458e-a3ac-cd1018c52586
Status: Created
Number of Bricks: 4
Transport-type: tcp
Bricks:
Brick1: 192.168.119.11:/data2/data
Brick2: 192.168.119.12:/data2/data
Brick3: 192.168.119.13:/data2/data
Brick4: 192.168.119.14:/data2/data
[root@localhost ~]# gluster volume delete dist_vol
Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y
volume delete: dist_vol: success
peer probe: success.
[root@localhost ~]# gluster peer probe 192.168.119.13
peer probe: success.
[root@localhost ~]# gluster peer probe 192.168.119.14
peer probe: success.
[root@localhost ~]# gluster peer status
Number of Peers: 3
Hostname: 192.168.119.12
Uuid: 9045836a-5d65-4dd9-86c1-0c019de6e62e
State: Peer in Cluster (Connected)
Hostname: 192.168.119.13
Uuid: 383426cf-002b-4e34-8336-cb6aa7f26863
State: Peer in Cluster (Connected)
Hostname: 192.168.119.14
Uuid: 4027ebf4-23b5-4c89-a0d8-d93a5939b5e4
State: Peer in Cluster (Connected)
## volume 생성
GlusterFS에서는 볼륨(Clustered Storage Building Blocks)의 구성원으로 Brick이라는 개념이 있다.
Brick이란 GlusterFS 서버 안의 특정한 폴더를 말하며, 이런 여러 개의 Brick들이 하나의 클러스터로 묶여 볼륨으로 제공된다. ( <서버 IP>:/<폴더 이름> )
Brick이란 GlusterFS 서버 안의 특정한 폴더를 말하며, 이런 여러 개의 Brick들이 하나의 클러스터로 묶여 볼륨으로 제공된다. ( <서버 IP>:/<폴더 이름> )
gluster의 volume 생성 방법은 크게 4가지가 존재한다.
distributed volume
-> 파일을 gluster 서버 노드의 각 Brick으로 분산해서 저장함. (기본)
replicated volume
-> 파일을 gluster 서버 노드의 각 Brick으로 분산해서 저장함. 지정된 replication 수 만큼의복제 파일을 생성함.
stripe volume
-> 여러 서버의 여러 Brick에 파일을 stripe 수로 만큼 나누어 저장함. 단 volume 생성 시 정의한 brick의 수와
Stripe 수가 일치해야 한다.
distributed stripe volume
-> 여러 gluster 서버 노드의 각 Brick 으로 파일을 stripe 수만큼 나누어 저장함.
distributed replicated volme :
-> multiple 한 replicated volume 구성을 형성할때 사용할 수 있다.
다음 명령을 통해 distributed replicated volme 을 생성할 수 있다
volume create: dist_vol: success: please start the volume to access datata2/data
[root@localhost ~]# gluster volume info dist_vol
Volume Name: dist_vol
Type: Distribute
Volume ID: 0556da7f-126e-458e-a3ac-cd1018c52586
Status: Created
Number of Bricks: 4
Transport-type: tcp
Bricks:
Brick1: 192.168.119.11:/data2/data
Brick2: 192.168.119.12:/data2/data
Brick3: 192.168.119.13:/data2/data
Brick4: 192.168.119.14:/data2/data
[root@localhost ~]# gluster volume delete dist_vol
Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y
volume delete: dist_vol: success
다음 명령을 통해 Distributed Volume을 생성할 수 있다
[root@localhost ~]# gluster volume create stri_dist_vol stripe 2 transport tcp 192.168.119.11:/data2/data2 192.168.119.12:/data2/data2 192.168.119.13:/data2/data2 192.168.119.14:/data2/data2
volume create: stri_dist_vol: success: please start the volume to access data
[root@localhost ~]# gluster volume info stri_dist_vol
Volume Name: stri_dist_vol
Type: Distributed-Stripe
Volume ID: 334fd296-2272-446f-a39c-7300de39898a
Status: Created
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.119.11:/data2/data2
Brick2: 192.168.119.12:/data2/data2
Brick3: 192.168.119.13:/data2/data2
Brick4: 192.168.119.14:/data2/data2
## client 에서 할거
[root@localhost ~]# cd /usr/local/src/
[root@localhost src]# wget http://bits.gluster.com/gluster/glusterfs/3.2.6/x86_64/glusterfs-core-3.2.6-1.x86_64.rpm
[root@localhost src]# rpm -Uvh glusterfs-fuse-3.2.6-1.x86_64.rpm
[root@localhost src]# rpm -Uvh glusterfs-core-3.2.6-1.x86_64.rpm
[root@localhost src]# modprobe fuse
mount -t glusterfs 192.168.119.11:/stri_dist_vol /mnt
했는데 안된다 --;;
로그에
[root@localhost glusterfs]# vi data-.log
[2014-11-22 17:50:44.328277] E [glusterfsd-mgmt.c:621:mgmt_getspec_cbk] 0-glusterfs: XDR decoding error
버전이 안맞어서 지우고 다시 설치 한다.
[root@localhost / ]# cd /etc/yum.repos.d/
[root@localhost yum.repos.d]# wget http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.1/EPEL.repo/glusterfs-epel.repo
[root@localhost yum.repos.d]# yum repolist
[root@localhost yum.repos.d]# yum install glusterfs*
[root@localhost glusterfs]# mount -t glusterfs 192.168.119.11:/stri_dist_vol /data/
[root@localhost glusterfs]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 16G 876M 15G 6% /
tmpfs 495M 0 495M 0% /dev/shm
192.168.119.11:/stri_dist_vol
79G 689M 75G 1% /data
volume create: stri_dist_vol: success: please start the volume to access data
[root@localhost ~]# gluster volume info stri_dist_vol
Volume Name: stri_dist_vol
Type: Distributed-Stripe
Volume ID: 334fd296-2272-446f-a39c-7300de39898a
Status: Created
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.119.11:/data2/data2
Brick2: 192.168.119.12:/data2/data2
Brick3: 192.168.119.13:/data2/data2
Brick4: 192.168.119.14:/data2/data2
## client 에서 할거
[root@localhost ~]# cd /usr/local/src/
[root@localhost src]# wget http://bits.gluster.com/gluster/glusterfs/3.2.6/x86_64/glusterfs-core-3.2.6-1.x86_64.rpm
[root@localhost src]# rpm -Uvh glusterfs-fuse-3.2.6-1.x86_64.rpm
[root@localhost src]# rpm -Uvh glusterfs-core-3.2.6-1.x86_64.rpm
[root@localhost src]# modprobe fuse
mount -t glusterfs 192.168.119.11:/stri_dist_vol /mnt
했는데 안된다 --;;
로그에
[root@localhost glusterfs]# vi data-.log
[2014-11-22 17:50:44.328277] E [glusterfsd-mgmt.c:621:mgmt_getspec_cbk] 0-glusterfs: XDR decoding error
버전이 안맞어서 지우고 다시 설치 한다.
[root@localhost / ]# cd /etc/yum.repos.d/
[root@localhost yum.repos.d]# wget http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.1/EPEL.repo/glusterfs-epel.repo
[root@localhost yum.repos.d]# yum repolist
[root@localhost yum.repos.d]# yum install glusterfs*
[root@localhost glusterfs]# mount -t glusterfs 192.168.119.11:/stri_dist_vol /data/
[root@localhost glusterfs]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 16G 876M 15G 6% /
tmpfs 495M 0 495M 0% /dev/shm
192.168.119.11:/stri_dist_vol
79G 689M 75G 1% /data
반응형
'System > Linux' 카테고리의 다른 글
centos nat lvs구성 (0) | 2015.05.27 |
---|---|
geo-ip setting (0) | 2015.05.27 |
zlib install(1.2.8) (0) | 2015.05.27 |
dd(dsik dump) 명령어 (0) | 2013.02.09 |
브릿지 방화벽 구축 (0) | 2013.02.09 |