all of this, mons have more mem!

This commit is contained in:
2020-03-28 18:19:05 +01:00
parent 2e9a31540f
commit d952990929
3 changed files with 9 additions and 8 deletions

View File

@@ -1,11 +1,10 @@
[global] [global]
err_to_syslog = false
fsid = f33952a3-e074-458b-9ce8-e0e035a2f8c4 fsid = f33952a3-e074-458b-9ce8-e0e035a2f8c4
log_to_file = true log_to_file = true
log_to_syslog = false
mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789] mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789]
mon_initial_members = pine01, pine02 mon_initial_members = pine01, pine02
mon_memory_target = 1610612736
[mon] [mon]
cluster_log_file_level = warn cluster_log_file_level = warn

View File

@@ -19,7 +19,8 @@ auth_client_required = cephx
public network = 192.168.10.0/24 public network = 192.168.10.0/24
cluster network = 10.255.255.0/27 cluster network = 10.255.255.0/27
# MON # MON
mon memory target = 1073741824 #mon memory target = 1073741824
mon memory target = 1610612736
mon osd cache size min = 134217728 #default 128Mb mon osd cache size min = 134217728 #default 128Mb
mon pg warn min per osd = 10 mon pg warn min per osd = 10
@@ -28,7 +29,7 @@ osd pool default pg num = 8
osd pool default pgp num = 8 osd pool default pgp num = 8
osd pool default size = 2 # Write an object 3 times. osd pool default size = 2 # Write an object 3 times.
osd pool default min size = 1 # Allow writing two copies in a degraded state. osd pool default min size = 1 # Allow writing two copies in a degraded state.
osd max backfills = 1 osd max backfills = 2
#MEM #MEM
#50mb / 5mb / 64mb #50mb / 5mb / 64mb
@@ -57,6 +58,7 @@ debug rocksdb = 0/0
debug paxos = 0/0 debug paxos = 0/0
debug auth = 0/0 debug auth = 0/0
cluster log file level = warn cluster log file level = warn
mon cluster log to file = false
[osd] [osd]
debug osd = 0/0 debug osd = 0/0

View File

@@ -2,14 +2,14 @@
./shutdown-cluster.sh ./shutdown-cluster.sh
sudo ceph osd lspools sudo ceph osd lspools
sudo ceph osd pool set default.rgw.buckets.data target_size_ratio 0.5 sudo ceph osd pool set default.rgw.buckets.data target_size_ratio 0.3
sudo ceph osd pool set cephfs.ceph.data target_size_ratio 0.5 sudo ceph osd pool set cephfs.ceph.data target_size_ratio 0.3
sudo ceph osd pool set cephfs.ceph.meta target_size_ratio 0.1 sudo ceph osd pool set cephfs.ceph.meta target_size_ratio 0.1
sudo ceph osd pool set .rgw.root target_size_ratio 0.1 sudo ceph osd pool set .rgw.root target_size_ratio 0.1
sudo ceph osd pool set default.rgw.control target_size_ratio 0.01 sudo ceph osd pool set default.rgw.control target_size_ratio 0.01
sudo ceph osd pool set default.rgw.log target_size_ratio 0.01 sudo ceph osd pool set default.rgw.log target_size_ratio 0.01
sudo ceph osd pool set default.rgw.meta target_size_ratio 0.1 sudo ceph osd pool set default.rgw.meta target_size_ratio 0.1
sudo ceph osd pool set default.rgw.buckets.index target_size_ratio 0.01 sudo ceph osd pool set default.rgw.buckets.index target_size_ratio 0.01
sudo ceph osd pool set scbench-hdd target_size_ratio 0.1 sudo ceph osd pool set scbench-hdd target_size_ratio 0.01
sudo ceph osd pool set scbench-ssd target_size_ratio 0.1 sudo ceph osd pool set scbench-ssd target_size_ratio 0.01