####################################################################### # Sample generated from a ceph-chef cookbook on vagrant cluster ####################################################################### [global] # This value must be set before the mon.rb recipe is called. Do this in your own recipe where you set your owner # variables. For an example, see ceph-chef/recipes/ceph-mon.rb fsid = e0a71d2c-a147-49ec-805e-ab8388aac890 keyring = /etc/ceph/$cluster.$name.keyring auth cluster required = cephx auth service required = cephx auth client required = cephx #cephx require signatures = true #cephx cluster require signatures = true #cephx service require signatures = false #cephx sign messages = true # Note: mon host (required) and mon initial members (optional) should be in global section in addition # to the cluster and public network options since they are all critical to every node. # List of all of the monitor nodes in the given cluster mon host = 10.0.100.21:6789, 10.0.100.22:6789, 10.0.100.23:6789 # Suppress warning of too many pgs mon pg warn max per osd = 0 max open files = 131072 cluster network = 10.121.2.0/24 public network = 10.0.100.0/24 # This is very flexible section. You can add more options OR override options from above simply by # specifying the values in your wrapper cookbook or your "chef-repo". If you override values then # you may see a warning in the logs letting you know you have overridden. rgw override bucket index max shards = 5 [osd] keyring = /var/lib/ceph/osd/$cluster-$id/keyring # Set the default values here if no values provided to override them # Need xattr use omap = true for RGW osd op threads = 16 osd map dedup = true osd max write size = 256 osd disk threads = 1 osd client message size cap = 1073741824 filestore xattr use omap = true filestore merge threshold = 40 filestore split multiple = 8 filestore op threads = 32 filestore min sync interval = 10 filestore max sync interval = 15 filestore queue max ops = 2500 filestore queue max bytes = 10485760 filestore queue committing max ops = 5000 filestore queue committing max bytes = 10485760000 osd journal size = 3000 journal max write bytes = 1073714824 journal max write entries = 10000 journal queue max ops = 50000 journal queue max bytes = 10485760000 journal block align = true journal dio = true journal aio = true osd mkfs type = xfs #osd mount options xfs = rw,noexec,nodev,noatime,nodiratime,nobarrier osd mount options xfs = rw,noatime,inode64,logbufs=8,logbsize=256k,delaylog,allocsize=4M # Example: osd mkfs options xfs = -f -i size=2048 # Example: osd mount options xfs = noexec,nodev,noatime,nodiratime,barrier=0,discard # IMPORTANT: If you modify the crush map with the automation then uncomment the line below (osd crush update ...) # otherwise the crush map will not get created correctly and the PGs will stay in inactive/unclean state. # In a production system it's good to set this value to 'false' and modify the crush map to fit your environment. #You can change the replica count via config or cli osd pool default size = 3 osd pool default min size = 2 osd pool default pg num = 64 osd pool default pgp num = 64 # Default is 0 for default crush rule. # Example: osd pool default crush rule = 1 # Host 1, Rack 3 - Default is 1. This can have an impact on the crushmap if not the default. osd crush chooseleaf type = 1 # Note: All of these values can be overridden in you wrapper cookbook or "chef-repo" project [mon] # Change the allow pool delete to false once you have everything where you want it. It keeps someone from # accidently deleting a pool! mon_allow_pool_delete = true clock drift allowed = 5.0 mon osd down out interval = 60 mon osd full ratio = 0.95 mon osd min down reporters = 1 mon osd nearfull ratio = 0.85 mon pg warn max object skew = 10 mon pg warn max per osd = 0 # Any items in the 'client' section will apply to all sub-client sections unless overridden in the sub-client section. [client] # admin socket must be writable by QEMU and allowed by SELinux or AppArmor # admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok admin socket = /var/run/ceph/$cluster-$type.$id.$pid.asok # NOTE: You can null out the 'admin socket' below if you wish. If present, when running ceph -s without being 'sudo' # you will receive an 'unable to bind error msg'. [client.admin] keyring = /etc/ceph/$cluster.client.admin.keyring admin socket = log file = [client.radosgw.gateway] host = ceph-vm1 rgw opstate ratelimit = 0 keyring = /etc/ceph/$cluster.client.radosgw.keyring rgw num rados handles = 5 rgw frontends = civetweb port=8080 num_threads=100 error_log_file=/var/log/radosgw/civetweb.error.log access_log_file=/var/log/radosgw/civetweb.access.log rgw enable ops log = false rgw enable usage log = false pid file = /var/run/ceph/$name.pid log file = /var/log/radosgw/$cluster.client.radosgw.log # Increased to 1 to log HTTP return codes - http://tracker.ceph.com/issues/12432 debug rgw = 1/0 rgw dns name = ceph-vm1.ceph.example.com rgw resolve cname = True cache max file size = 20000000 [client.restapi] public addr = 10.0.100.21:5080 keyring = /etc/ceph/$cluster.client.restapi.keyring restapi base url = /api/v0.1 log file = /var/log/ceph/$cluster.client.restapi.log