diff --git a/bench-hdd.txt b/bench-hdd.txt new file mode 100644 index 0000000..883a1a4 --- /dev/null +++ b/bench-hdd.txt @@ -0,0 +1,168 @@ +do@lenny ~$ sudo rados bench -p scbench-hdd 10 write --no-cleanup +hints = 1 +Maintaining 16 concurrent writes of 4194304 bytes to objects of size 4194304 for up to 10 seconds or 0 objects +Object prefix: benchmark_data_lenny_3637180 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 17 1 3.99925 4 0.854905 0.854905 + 2 16 20 4 7.99866 12 1.70749 1.1992 + 3 16 24 8 10.6648 16 2.98984 1.90804 + 4 16 26 10 9.99819 8 3.75672 2.24896 + 5 16 29 13 10.3981 12 4.60426 2.55642 + 6 16 32 16 10.6648 12 1.08795 2.82301 + 7 16 34 18 10.2839 8 1.35716 2.947 + 8 16 40 24 11.9979 24 5.58327 3.60565 + 9 16 43 27 11.9979 12 1.13108 3.66818 + 10 16 47 31 12.3977 16 6.76096 3.84986 + 11 16 48 32 11.6343 4 5.62342 3.90528 + 12 16 48 32 10.6648 0 - 3.90528 + 13 16 48 32 9.8444 0 - 3.90528 + 14 15 48 33 9.42689 1.33333 5.38488 3.95012 +Total time run: 14.4781 +Total writes made: 48 +Write size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 13.2614 +Stddev Bandwidth: 6.96687 +Max bandwidth (MB/sec): 24 +Min bandwidth (MB/sec): 0 +Average IOPS: 3 +Stddev IOPS: 1.77281 +Max IOPS: 6 +Min IOPS: 0 +Average Latency(s): 4.79904 +Stddev Latency(s): 2.39027 +Max latency(s): 9.87363 +Min latency(s): 0.854905 +do@lenny ~$ sudo rados bench -p scbench-hdd 10 rand +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 12 12 0 0 0 - 0 + 1 16 19 3 11.9907 12 0.78026 0.756034 + 2 16 22 6 11.994 12 1.4872 1.10345 + 3 16 28 12 15.9936 24 2.78407 1.7731 + 4 16 31 15 14.995 12 3.40679 2.09867 + 5 16 37 21 16.7951 24 3.9861 2.44746 + 6 16 40 24 15.9956 12 3.92352 2.58089 + 7 16 46 30 17.1384 24 2.61075 2.64985 + 8 16 51 35 17.4956 20 1.70534 2.6997 + 9 16 54 38 16.8848 12 1.71139 2.67779 + 10 16 60 44 17.5958 24 1.41101 2.7275 + 11 15 61 46 16.7234 8 3.04846 2.81068 + 12 15 61 46 15.3298 0 - 2.81068 + 13 7 61 54 16.6116 16 2.80271 3.05892 +Total time run: 13.3231 +Total reads made: 61 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 18.3141 +Average IOPS: 4 +Stddev IOPS: 1.86396 +Max IOPS: 6 +Min IOPS: 0 +Average Latency(s): 3.31305 +Max latency(s): 8.39827 +Min latency(s): 0.612968 +do@lenny ~$ sudo rados bench -p scbench-hdd 10 seq +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.9949 12 0.746218 0.681344 + 2 16 24 8 15.9955 20 1.96857 1.21254 + 3 16 28 12 15.9962 16 2.74538 1.63604 + 4 16 34 18 17.996 24 1.93976 2.11562 + 5 16 38 22 17.5963 16 3.60988 2.31159 + 6 16 43 27 17.9962 20 2.51393 2.4823 + 7 16 48 32 18.2821 20 3.88385 2.64439 + 8 15 48 33 16.497 4 3.35054 2.66579 + 9 14 48 34 15.1084 4 3.5924 2.69305 + 10 6 48 42 16.7969 32 5.34022 3.01666 +Total time run: 10.0544 +Total reads made: 48 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 19.0961 +Average IOPS: 4 +Stddev IOPS: 2.14994 +Max IOPS: 8 +Min IOPS: 1 +Average Latency(s): 3.21351 +Max latency(s): 6.37929 +Min latency(s): 0.626458 +do@lenny ~$ sudo rados bench -p scbench-hdd 30 rand +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.9948 12 0.703546 0.689649 + 2 16 25 9 17.9902 24 1.91925 1.27537 + 3 16 28 12 15.9933 12 2.58048 1.59284 + 4 16 34 18 17.9935 24 2.41106 2.0864 + 5 16 40 24 19.1939 24 2.87461 2.26437 + 6 16 46 30 19.9941 24 1.17153 2.34959 + 7 16 49 33 18.8519 12 1.77618 2.39425 + 8 16 55 39 19.4948 24 1.7475 2.49689 + 9 16 61 45 19.995 24 2.29268 2.60587 + 10 16 65 49 19.5953 16 4.70785 2.67741 + 11 16 70 54 19.6317 20 0.846898 2.64328 + 12 16 75 59 19.6621 20 4.68773 2.6837 + 13 16 81 65 19.9954 24 0.591887 2.68116 + 14 16 84 68 19.4241 12 1.23455 2.68813 + 15 16 90 74 19.729 24 0.746046 2.70456 + 16 16 94 78 19.4958 16 0.400425 2.75191 + 17 16 100 84 19.7605 24 0.667928 2.72239 + 18 16 106 90 19.9958 24 1.25919 2.72179 + 19 16 111 95 19.9959 20 2.24736 2.75825 +2020-03-26 19:59:45.994068 min lat: 0.400425 max lat: 6.62169 avg lat: 2.74983 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 20 16 115 99 19.7959 16 0.736072 2.74983 + 21 16 121 105 19.9959 24 6.73841 2.82779 + 22 15 125 110 19.996 20 7.33018 2.85325 + 23 16 132 116 20.1699 24 2.1527 2.87804 + 24 16 137 121 20.1627 20 0.588809 2.87121 + 25 16 143 127 20.316 24 1.81338 2.90248 + 26 16 149 133 20.4575 24 1.49447 2.88299 + 27 16 154 138 20.4404 20 1.68318 2.87505 + 28 16 159 143 20.4245 20 0.628533 2.87339 + 29 16 165 149 20.5477 24 1.10452 2.86992 + 30 16 171 155 20.6626 24 0.547918 2.86919 + 31 16 172 156 20.1251 4 6.05471 2.88961 + 32 16 172 156 19.4963 0 - 2.88961 +Total time run: 32.8608 +Total reads made: 172 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 20.9368 +Average IOPS: 5 +Stddev IOPS: 1.5606 +Max IOPS: 6 +Min IOPS: 0 +Average Latency(s): 3.02835 +Max latency(s): 7.60182 +Min latency(s): 0.400425 +do@lenny ~$ sudo rados bench -p scbench-hdd 30 seq +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.9943 12 0.936671 0.832956 + 2 16 22 6 11.9958 12 1.67656 1.20164 + 3 16 26 10 13.3289 16 2.88472 1.71382 + 4 16 31 15 14.9955 20 3.89267 2.31528 + 5 16 35 19 15.1959 16 1.89476 2.48036 + 6 16 40 24 15.9959 20 4.37326 2.73131 + 7 16 46 30 17.1387 24 4.70735 2.85483 + 8 16 48 32 15.9964 8 4.15251 2.83948 + 9 13 48 35 15.5522 12 5.24734 2.93356 + 10 11 48 37 14.7969 8 4.84108 2.98921 +Total time run: 10.4787 +Total reads made: 48 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 18.3229 +Average IOPS: 4 +Stddev IOPS: 1.33749 +Max IOPS: 6 +Min IOPS: 2 +Average Latency(s): 3.33472 +Max latency(s): 6.0753 +Min latency(s): 0.661349 + diff --git a/bench-ssd.txt b/bench-ssd.txt new file mode 100644 index 0000000..27011f7 --- /dev/null +++ b/bench-ssd.txt @@ -0,0 +1,172 @@ +do@lenny ~$ sudo rados bench -p scbench-ssd 10 write --no-cleanup +hints = 1 +Maintaining 16 concurrent writes of 4194304 bytes to objects of size 4194304 for up to 10 seconds or 0 objects +Object prefix: benchmark_data_lenny_3639885 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 16 0 0 0 - 0 + 2 16 20 4 7.99861 8 1.55338 1.33348 + 3 16 23 7 9.3317 12 2.58585 1.8076 + 4 16 25 9 8.99783 8 1.93891 1.9839 + 5 16 30 14 11.1975 20 4.94566 2.74343 + 6 16 34 18 11.9974 16 1.56079 2.84698 + 7 16 36 20 11.4262 8 2.35802 3.01545 + 8 16 39 23 11.4976 12 7.98993 3.41017 + 9 16 41 25 11.1088 8 0.890175 3.29592 + 10 16 45 29 11.5977 16 1.70533 3.3532 + 11 15 46 31 11.2704 8 2.52783 3.36889 + 12 14 46 32 10.6646 4 3.09921 3.36046 + 13 14 46 32 9.84422 0 - 3.36046 + 14 10 46 36 10.2837 8 3.90924 3.76006 +Total time run: 14.7675 +Total writes made: 46 +Write size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 12.4598 +Stddev Bandwidth: 5.74934 +Max bandwidth (MB/sec): 20 +Min bandwidth (MB/sec): 0 +Average IOPS: 3 +Stddev IOPS: 1.43734 +Max IOPS: 5 +Min IOPS: 0 +Average Latency(s): 4.80864 +Stddev Latency(s): 3.27751 +Max latency(s): 13.214 +Min latency(s): 0.845786 +do@lenny ~$ sudo rados bench -p scbench-ssd 10 rand +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.995 12 0.898158 0.82445 + 2 16 23 7 13.9958 16 1.88579 1.25992 + 3 16 26 10 13.3299 12 2.64588 1.59851 + 4 16 30 14 13.9966 16 3.83668 1.93553 + 5 16 35 19 15.1964 20 1.47049 2.20992 + 6 16 38 22 14.6634 12 1.45423 2.42733 + 7 16 44 28 15.9965 24 0.959714 2.73725 + 8 16 47 31 15.4966 12 1.37284 2.85643 + 9 16 52 36 15.9966 20 5.0076 3.03584 + 10 16 55 39 15.5967 12 0.690651 3.07763 + 11 16 56 40 14.5424 4 4.92254 3.12375 + 12 16 56 40 13.3305 0 - 3.12375 + 13 16 56 40 12.3051 0 - 3.12375 +Total time run: 13.9669 +Total reads made: 56 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 16.0379 +Average IOPS: 4 +Stddev IOPS: 1.84669 +Max IOPS: 6 +Min IOPS: 0 +Average Latency(s): 3.94751 +Max latency(s): 8.45673 +Min latency(s): 0.686362 +do@lenny ~$ sudo rados bench -p scbench-ssd 10 seq +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.9959 12 0.689586 0.66343 + 2 16 25 9 17.9956 24 1.21362 1.19095 + 3 16 28 12 15.9964 12 1.30662 1.43019 + 4 16 32 16 15.9968 16 3.99621 1.7762 + 5 16 36 20 15.9969 16 4.64822 2.13385 + 6 16 41 25 16.6635 20 5.0968 2.33332 + 7 16 44 28 15.9971 12 5.4352 2.45223 + 8 16 46 30 14.9972 8 1.53563 2.46951 + 9 13 46 33 14.664 12 7.01039 2.61443 + 10 8 46 38 15.1972 20 5.85954 2.99876 +Total time run: 10.3346 +Total reads made: 46 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 17.8043 +Average IOPS: 4 +Stddev IOPS: 1.22927 +Max IOPS: 6 +Min IOPS: 2 +Average Latency(s): 3.43487 +Max latency(s): 9.03273 +Min latency(s): 0.634308 +do@lenny ~$ sudo rados bench -p scbench-ssd 30 rand +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.9952 12 0.68926 0.625227 + 2 16 24 8 15.9953 20 1.49809 1.03518 + 3 16 31 15 19.9948 28 2.71333 1.53183 + 4 16 37 21 20.9949 24 1.75632 1.86349 + 5 16 43 27 21.5948 24 3.75659 2.05061 + 6 16 49 33 21.995 24 2.48837 2.11767 + 7 16 54 38 21.7095 20 0.659536 2.213 + 8 16 60 44 21.9952 24 0.614797 2.27264 + 9 16 65 49 21.7731 20 3.18292 2.34831 + 10 16 71 55 21.9954 24 2.73803 2.45288 + 11 16 77 61 22.1773 24 3.25429 2.47174 + 12 16 82 66 21.9956 20 3.3704 2.50903 + 13 16 88 72 22.1494 24 3.89392 2.54965 + 14 16 93 77 21.9957 20 2.76956 2.58599 + 15 16 99 83 22.129 24 2.89327 2.60995 + 16 16 105 89 22.2457 24 2.88837 2.6081 + 17 16 110 94 22.1134 20 3.16836 2.62964 + 18 16 116 100 22.218 24 3.01529 2.64267 + 19 16 121 105 22.1011 20 3.52018 2.63621 +2020-03-26 20:04:22.748117 min lat: 0.533279 max lat: 4.52016 avg lat: 2.62519 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 20 16 127 111 22.1958 24 0.555885 2.62519 + 21 16 132 116 22.091 20 3.24145 2.658 + 22 16 138 122 22.1776 24 3.53678 2.65736 + 23 16 143 127 22.0827 20 3.10933 2.66557 + 24 16 149 133 22.1625 24 3.74029 2.67077 + 25 16 155 139 22.2359 24 0.582075 2.66862 + 26 16 160 144 22.1497 20 3.49143 2.68488 + 27 16 166 150 22.2181 24 3.25106 2.70005 + 28 16 172 156 22.2816 24 0.5923 2.69024 + 29 16 177 161 22.2028 20 3.58342 2.70912 + 30 16 181 165 21.996 16 1.84561 2.70472 + 31 16 182 166 21.4154 4 2.98895 2.70643 + 32 16 182 166 20.7462 0 - 2.70643 + 33 16 182 166 20.1176 0 - 2.70643 + 34 16 182 166 19.5259 0 - 2.70643 +Total time run: 34.0496 +Total reads made: 182 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 21.3806 +Average IOPS: 5 +Stddev IOPS: 1.87107 +Max IOPS: 7 +Min IOPS: 0 +Average Latency(s): 2.99141 +Max latency(s): 8.15608 +Min latency(s): 0.533279 +do@lenny ~$ sudo rados bench -p scbench-ssd 30 seq +hints = 1 + sec Cur ops started finished avg MB/s cur MB/s last lat(s) avg lat(s) + 0 0 0 0 0 0 - 0 + 1 16 19 3 11.9894 12 0.917781 0.83888 + 2 16 22 6 11.9939 12 1.66418 1.21039 + 3 16 25 9 11.9954 12 1.56822 1.49919 + 4 16 30 14 13.9953 20 3.92662 2.08455 + 5 16 34 18 14.3956 16 1.75657 2.29691 + 6 16 36 20 13.3295 8 4.73111 2.57761 + 7 16 39 23 13.1393 12 6.17507 2.89999 + 8 16 42 26 12.9966 12 6.40956 2.97197 + 9 16 46 30 13.33 16 4.90467 3.18248 + 10 16 46 30 11.9971 0 - 3.18248 + 11 12 46 34 12.3607 8 5.35723 3.54036 + 12 12 46 34 11.3307 0 - 3.54036 + 13 12 46 34 10.4591 0 - 3.54036 +Total time run: 13.3978 +Total reads made: 46 +Read size: 4194304 +Object size: 4194304 +Bandwidth (MB/sec): 13.7336 +Average IOPS: 3 +Stddev IOPS: 1.61325 +Max IOPS: 5 +Min IOPS: 0 +Average Latency(s): 4.40677 +Max latency(s): 11.0811 +Min latency(s): 0.790116 diff --git a/bench/run.sh b/bench/run.sh new file mode 100755 index 0000000..87504bc --- /dev/null +++ b/bench/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +POOL=$1 + +rados bench -p $POOL 10 write --no-cleanup +rados bench -p $POOL 10 rand +rados bench -p $POOL 10 seq +rados bench -p $POOL 30 rand +rados bench -p $POOL 30 seq + diff --git a/ceph.conf b/ceph.conf index 1f182d2..89f0cbd 100644 --- a/ceph.conf +++ b/ceph.conf @@ -1,52 +1,21 @@ -[global] -fsid = f33952a3-e074-458b-9ce8-e0e035a2f8c4 -log to file = false -#ms_bind_ipv6 = true -mon_initial_members = pine01, pine02 -mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789] -#mon_initial_members = pine01, pine02, pine03 -#mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789], [v2:192.168.10.19:3300,v1:192.168.10.19:6789] -auth_cluster_required = cephx -auth_service_required = cephx -auth_client_required = cephx -#auth cluster required = none -#auth service required = none -#auth client required = none -public network = 192.168.10.0/24 -cluster network = 10.255.255.0/27 -osd pool default pg num = 8 -osd pool default pgp num = 8 -osd pool default size = 2 # Write an object 3 times. -osd pool default min size = 1 # Allow writing two copies in a degraded state. -osd max backfills = 1 -mon pg warn min per osd = 10 -#50mb / 5mb / 64mb -mon memory target = 1073741824 -mon_osd_cache_size_min = 134217728 #default 128Mb -osd_memory_target = 134217728 #128 Mb/ 5 sata ports -osd_memory_base = 134217728 #128 Mb/ 5 sata ports -osd_memory_cache_min = 67108864 #64Mb -bluestore_cache_autotune = true -bluestore_cache_size = 52428800 -bluestore_cache_size_hdd = 5242880 -bluestore_cache_size_ssd = 5242880 -#bluestore_cache_kv_max = 67108864 -#rocksdb_cache_size = 5242880 -#LOGGING off -#debug ms = 0/0 -# -#[mon] -# debug mon = 0/0 -# debug paxos = 0/0 -# debug auth = 0/0 -# -#[osd] -# debug osd = 0/0 -# debug filestore = 0/0 -# debug journal = 0 -# debug monc = 0/0 -# -#[mds] -# debug mds = 0 -# debug mds balancer = 0 +[global] + debug_ms = 1/1 + fsid = f33952a3-e074-458b-9ce8-e0e035a2f8c4 + mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789] + mon_initial_members = pine01, pine02 + +[mds] + debug_mds = 1 + debug_mds_balancer = 1 + +[mon] + debug_auth = 1/1 + debug_mon = 1/1 + debug_paxos = 1/1 + +[osd] + debug_filestore = 1/1 + debug_journal = 1 + debug_monc = 1/1 + debug_osd = 1/1 diff --git a/ceph.conf.assimilate.we-are-the-borg b/ceph.conf.assimilate.we-are-the-borg new file mode 100644 index 0000000..5a3d93a --- /dev/null +++ b/ceph.conf.assimilate.we-are-the-borg @@ -0,0 +1,59 @@ +[global] +fsid = f33952a3-e074-458b-9ce8-e0e035a2f8c4 +log to file = false +log to stderr = false +log to syslog = true +#ms_bind_ipv6 = true +mon_initial_members = pine01, pine02 +mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789] +#mon_initial_members = pine01, pine02, pine03 +#mon_host = [v2:192.168.10.160:3300,v1:192.168.10.160:6789], [v2:192.168.10.161:3300,v1:192.168.10.161:6789], [v2:192.168.10.19:3300,v1:192.168.10.19:6789] +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx +#auth cluster required = none +#auth service required = none +#auth client required = none +public network = 192.168.10.0/24 +cluster network = 10.255.255.0/27 +# MON +mon memory target = 1073741824 +mon osd cache size min = 134217728 #default 128Mb +mon pg warn min per osd = 10 + +#OSD +osd pool default pg num = 8 +osd pool default pgp num = 8 +osd pool default size = 2 # Write an object 3 times. +osd pool default min size = 1 # Allow writing two copies in a degraded state. +osd max backfills = 1 + +#MEM +#50mb / 5mb / 64mb +osd memory target = 134217728 #128 Mb/ 5 sata ports +osd memory base = 134217728 #128 Mb/ 5 sata ports +osd memory cache min = 67108864 #64Mb +bluestore cache autotune = true +bluestore cache size = 52428800 +bluestore cache size hdd = 5242880 +bluestore cache size ssd = 5242880 +#bluestore cache kv max = 67108864 +#rocksdb cache size = 5242880 + +#LOGGING off +debug ms = 1/1 + +[mon] + debug mon = 1/1 + debug paxos = 1/1 + debug auth = 1/1 + +[osd] + debug osd = 1/1 + debug filestore = 1/1 + debug journal = 1 + debug monc = 1/1 + +[mds] + debug mds = 1 + debug mds balancer = 1