Skip to content

Commit

Permalink
Merge pull request #1009 from ceph/clarify-osd-scenarios
Browse files Browse the repository at this point in the history
osd: clarify osd scenarios
  • Loading branch information
leseb authored Oct 6, 2016
2 parents 9d296a7 + 9753e29 commit 2a7a54c
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 38 deletions.
1 change: 1 addition & 0 deletions group_vars/common-coreoss.sample
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ dummy:
#pypy_binary_directory: /opt/bin
#pip_url: https://bootstrap.pypa.io/get-pip.py
#local_temp_directory: /tmp

69 changes: 50 additions & 19 deletions group_vars/osds.sample
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,18 @@
# file as a good configuration file when no variable in it.
dummy:

# Variables here are applicable to all host groups NOT roles

# This sample file generated by generate_group_vars_sample.sh

# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
#dummy:

# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead


###########
# GENERAL #
###########
Expand All @@ -22,10 +31,22 @@ dummy:
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false


####################
# OSD CRUSH LOCATION
####################

# /!\
#
# BE EXTREMELY CAREFUL WITH THIS OPTION
# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
#
# /!\
#
# It is probably best to keep this option to 'false' as the default
# suggests it. This option should only be used while doing some complex
# CRUSH map. It allows you to force a specific location for a set of OSDs.
#
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
Expand All @@ -40,6 +61,7 @@ dummy:
#crush_location: false
#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"


##############
# CEPH OPTIONS
##############
Expand All @@ -55,52 +77,61 @@ dummy:
#


# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices to be used as OSDs
# All scenario(except 3rd) inherit from the following device declaration

# Declare devices
# All the scenarii inherit from the following device declaration
#
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde


#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# You can use this option with First and Forth and Fifth OSDS scenario.
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
# This mode prevents you from filling out the 'devices' variable above.
#
#osd_auto_discovery: false


# !! WARNING !!
# #
# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
# #
# # !! WARNING !!
#

# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
# List devices under 'devices' variable above or choose 'osd_auto_discovery'


#journal_collocation: false


# II. N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# II. Second scenario: N journal devices for N OSDs
# Use 'true' for 'raw_multi_journal' to enable this scenario
# List devices under 'devices' variable above and
# write journal devices for those under 'raw_journal_devices'
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# * sdb and sdc will get sdf as a journal
# * sdd and sde will get sdg as a journal

# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices

#raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdb
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg


# III. Use directory instead of disk for OSDs
Expand Down
69 changes: 50 additions & 19 deletions roles/ceph-osd/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,16 @@
---
# Variables here are applicable to all host groups NOT roles

# This sample file generated by generate_group_vars_sample.sh

# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:

# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead


###########
# GENERAL #
###########
Expand All @@ -14,10 +23,22 @@ fetch_directory: fetch/
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false


####################
# OSD CRUSH LOCATION
####################

# /!\
#
# BE EXTREMELY CAREFUL WITH THIS OPTION
# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
#
# /!\
#
# It is probably best to keep this option to 'false' as the default
# suggests it. This option should only be used while doing some complex
# CRUSH map. It allows you to force a specific location for a set of OSDs.
#
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
Expand All @@ -32,6 +53,7 @@ copy_admin_key: false
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"


##############
# CEPH OPTIONS
##############
Expand All @@ -47,52 +69,61 @@ cephx: true
#


# !! WARNING !!
#
# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
#
# !! WARNING !!
# Declare devices to be used as OSDs
# All scenario(except 3rd) inherit from the following device declaration

# Declare devices
# All the scenarii inherit from the following device declaration
#
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde


#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# You can use this option with First and Forth and Fifth OSDS scenario.
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
# This mode prevents you from filling out the 'devices' variable above.
#
osd_auto_discovery: false


# !! WARNING !!
# #
# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
# #
# # !! WARNING !!
#

# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
# List devices under 'devices' variable above or choose 'osd_auto_discovery'


journal_collocation: false


# II. N journal devices for N OSDs
# Use 'true' to enable this scenario
#
# II. Second scenario: N journal devices for N OSDs
# Use 'true' for 'raw_multi_journal' to enable this scenario
# List devices under 'devices' variable above and
# write journal devices for those under 'raw_journal_devices'
# In the following example:
# * sdd and sde will get sdb as a journal
# * sdf and sdg will get sdc as a journal
# * sdb and sdc will get sdf as a journal
# * sdd and sde will get sdg as a journal

# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices

raw_multi_journal: false
#raw_journal_devices:
# - /dev/sdb
# - /dev/sdb
# - /dev/sdc
# - /dev/sdc
# - /dev/sdf
# - /dev/sdf
# - /dev/sdg
# - /dev/sdg


# III. Use directory instead of disk for OSDs
Expand Down

0 comments on commit 2a7a54c

Please sign in to comment.