Merge pull request #4 from rhcarvalho/std-1

Improvements
This commit is contained in:
Ben Parees 2015-06-06 00:29:15 -04:00
commit aa6c823256
7 changed files with 260 additions and 147 deletions

View file

@ -1,5 +1,10 @@
#!/bin/bash #!/bin/bash
# This script uses these environment variables:
#
# DISABLE_COLLECTSTATIC: if not empty, inhibits execution of 'manage.py collectstatic'.
function assemble() {
# For SCL enablement # For SCL enablement
source .bashrc source .bashrc
@ -19,34 +24,40 @@ fi
# set permissions for any installed artifacts # set permissions for any installed artifacts
chmod -R og+rwx /opt/openshift chmod -R og+rwx /opt/openshift
# Support for Django
# Take shallowest manage.py script
MANAGE_FILE=$(find . -maxdepth 3 -type f -name 'manage.py' -printf '%d\t%P\n' | sort -nk1 | cut -f2 | head -1)
if pip show -q django && [ -f "$MANAGE_FILE" ]; then
set +e set +e
django_collectstatic
# Check if collectstatic can be run. set -e
python $MANAGE_FILE collectstatic --dry-run --noinput &> /dev/null && RUN_COLLECTSTATIC=true
# Collect assets if settings seems okay.
if [ "$RUN_COLLECTSTATIC" ]; then
echo "---> Collecting static assets ..."
python $MANAGE_FILE collectstatic --noinput 2>&1 | sed '/^Copying/d;/^$/d;/^ /d'
[ $? -ne 0 ] && {
echo "ERROR: 'manage.py collectstatic' failed. See the build logs for more info."
exit 1
} || true
else
echo "WARNING: 'manage.py collectstatic' ignored. To debug, run:"
echo " $ python $MANAGE_FILE collectstatic --noinput"
echo "Ignore this warning if you're not serving static files with Django."
fi
fi
# remove pip temporary directory # remove pip temporary directory
rm -rf /tmp/pip_build_default rm -rf /tmp/pip_build_default
}
function django_collectstatic() {
[ -n "$DISABLE_COLLECTSTATIC" ] && return
! pip show -q django && return
# Find shallowest manage.py script, either ./manage.py or <project>/manage.py
local MANAGE_FILE=$(find . -maxdepth 2 -type f -name 'manage.py' -printf '%d\t%P\n' | sort -nk1 | cut -f2 | head -1)
if [ ! -f "$MANAGE_FILE" ]; then
echo "WARNING: seems that you're using Django, but we could not find a 'manage.py' file."
echo "'manage.py collectstatic' ignored."
return
fi
echo "---> Collecting Django static files ..."
if ! python $MANAGE_FILE collectstatic --dry-run --noinput &> /dev/null; then
echo "WARNING: could not run 'manage.py collectstatic'. To debug, run:"
echo " $ python $MANAGE_FILE collectstatic --noinput"
echo "Ignore this warning if you're not serving static files with Django."
return
fi
if ! python $MANAGE_FILE collectstatic --noinput 2>&1 | sed '/^Copying/d;/^$/d;/^ /d'; then
local status=$?
echo "ERROR: 'manage.py collectstatic' failed."
return $status
fi
}
assemble

View file

@ -1,38 +1,109 @@
#!/bin/bash #!/bin/bash
function is_gunicorn_installed() { # This script uses these environment variables:
pip show gunicorn #
} # APP_MODULE: Python dotted path to your WSGI application.
# If not provided, tries to find the Python path to a 'wsgi.py' file
# in the source tree.
# That file is present in Django projects by default.
# APP_CONFIG: Optional configuration file for gunicorn.
# APP_FILE: Optional path to Python script to run your application.
# Defaults to 'app.py' if it exists.
#
# DISABLE_MIGRATE: if not empty, inhibits execution of 'manage.py migrate'.
BIND_ADDR="0.0.0.0:8080"
function run() {
# For SCL enablement # For SCL enablement
source .bashrc source .bashrc
set -e set -e
# Try to run application with the strategies below, in precedence order.
# The first successful strategy takes over this script's process via exec.
# If no strategy succeed, report an error message and terminate.
run_django
run_gunicorn "$APP_MODULE"
run_python_script "${APP_FILE:-app.py}"
# Support for Django echo "ERROR: don't know how to run your application."
echo "Please set either APP_MODULE or APP_FILE environment variables,"
echo "or create a file 'app.py' to launch your application."
return 1
}
# Take shallowest manage.py script function run_django() {
MANAGE_FILE=$(find . -maxdepth 3 -type f -name 'manage.py' -printf '%d\t%P\n' | sort -nk1 | cut -f2 | head -1) ! pip show -q django && return
if pip show -q django && [ -f "$MANAGE_FILE" ]; then # Find shallowest manage.py script, either ./manage.py or <project>/manage.py
set -x local MANAGE_FILE=$(find . -maxdepth 2 -type f -name 'manage.py' -printf '%d\t%P\n' | sort -nk1 | cut -f2 | head -1)
python $MANAGE_FILE migrate --noinput
set +x django_migrate "$MANAGE_FILE"
# try to use gunicorn
run_gunicorn "$APP_MODULE"
# or fallback to Django's development server
django_runserver "$MANAGE_FILE"
}
function django_migrate() {
[ -n "$DISABLE_MIGRATE" ] && return
echo "---> Migrating database ..."
django_manage_cmd "$1" migrate --noinput
}
function django_runserver() {
echo "---> Serving application with 'manage.py runserver' ..."
echo "---> WARNING: this is NOT a recommended way to run you application in production!"
django_manage_cmd "$1" runserver "$BIND_ADDR"
}
function django_manage_cmd() {
local MANAGE_FILE="$1"
local CMD="${@:2}"
if [ ! -f "$MANAGE_FILE" ]; then
echo "WARNING: seems that you're using Django, but we could not find a 'manage.py' file."
echo "'manage.py $CMD' ignored."
return
fi fi
if [[ "$CMD" =~ ^runserver ]]; then
exec python "$MANAGE_FILE" $CMD
else
python "$MANAGE_FILE" $CMD
fi
}
export APP_FILE=${APP_FILE:-"app.py"} function run_gunicorn() {
! pip show -q gunicorn && return
if [[ ! -v APP_MODULE && -f setup.py ]]; then local APP_MODULE="$1"
APP_MODULE=`python setup.py --name`":application"
if [ -z "$APP_MODULE" ]; then
# Find shallowest wsgi.py file, one of ./wsgi.py, <project>/wsgi.py or <project>/<project>/wsgi.py,
# replace "/" with "." and remove ".py" suffix
APP_MODULE=$(find . -maxdepth 3 -type f -name 'wsgi.py' -printf '%d\t%P\n' | sort -nk1 | cut -f2 | head -1 | sed 's:/:.:;s:.py$::')
fi fi
if is_gunicorn_installed && [[ -v APP_MODULE ]]; then if [ -z "$APP_MODULE" ]; then
if [[ -v APP_CONFIG ]]; then echo "WARNING: seems that you're trying to use gunicorn, but no WSGI application module was specified."
export CONFIG="--config ${APP_CONFIG}" return
fi
exec gunicorn ${APP_MODULE} --bind=:8080 ${CONFIG}
fi fi
exec python -u ${APP_FILE} echo "---> Serving application with gunicorn ..."
exec gunicorn "$APP_MODULE" --bind="$BIND_ADDR" --access-logfile=- --config "$APP_CONFIG"
}
function run_python_script() {
local APP_FILE="$1"
[ ! -f "$APP_FILE" ] && return
echo "---> Running application from Python script ..."
exec python -u "$APP_FILE"
}
run

112
README.md
View file

@ -24,6 +24,24 @@ From this initial state you can:
* install more Python libraries and add them to the `requirements.txt` file * install more Python libraries and add them to the `requirements.txt` file
## Special files in this repository
Apart from the regular files created by Django (`project/*`, `welcome/*`, `manage.py`), this repository contains:
```
.sti/
└── bin/ - scripts used by source-to-image
├── assemble - executed to produce a Docker image with your code and dependencies during build
└── run - executed to start your app during deployment
openshift/ - OpenShift-specific files
├── scripts - helper scripts
└── templates - application templates
requirements.txt - list of dependencies
```
## Local development ## Local development
To run this project in your development machine, follow these steps: To run this project in your development machine, follow these steps:
@ -42,11 +60,11 @@ To run this project in your development machine, follow these steps:
`./manage.py migrate` `./manage.py migrate`
4. If everything is alright, you should be able to start the Django development server: 5. If everything is alright, you should be able to start the Django development server:
`./manage.py runserver` `./manage.py runserver`
5. Open your browser and go to http://127.0.0.1:8000, you will be greeted with a welcome page. 6. Open your browser and go to http://127.0.0.1:8000, you will be greeted with a welcome page.
## Deploying to OpenShift ## Deploying to OpenShift
@ -56,13 +74,13 @@ To follow the next steps, you need to be logged in to an OpenShift cluster and h
### Using an application template ### Using an application template
The directory `openshift/` contains OpenShift application template files that you can add you your OpenShift project with: The directory `openshift/templates/` contains OpenShift application templates that you can add to your OpenShift project with:
osc create -f openshift/<TEMPLATE_NAME>.json osc create -f openshift/templates/<TEMPLATE_NAME>.json
The template `django-source.json` contains just a minimal set of components to get your Django application into OpenShift. The template `django.json` contains just a minimal set of components to get your Django application into OpenShift.
The template `django-source-postgresql.json` contains all of the components from `django-source.json`, plus a PostgreSQL database service and an Image Stream for the Python base image. The template `django-postgresql.json` contains all of the components from `django.json`, plus a PostgreSQL database service and an Image Stream for the Python base image. For simplicity, the PostgreSQL database in this template uses ephemeral storage and, therefore, is not production ready.
After adding your templates, you can go to your OpenShift web console, browse to your project and click the create button. Create a new app from one of the templates that you have just added. After adding your templates, you can go to your OpenShift web console, browse to your project and click the create button. Create a new app from one of the templates that you have just added.
@ -104,24 +122,82 @@ Service "django-ex" created at 172.30.16.213 with port mappings 8080.
You can access your application by browsing to the service's IP address and port. You can access your application by browsing to the service's IP address and port.
## Special files in this repository ## Logs
Apart from the regular files created by Django (`project/*`, `welcome/*`, `manage.py`), this repository contains: By default your Django application is served with gunicorn and configured to output its access log to stderr.
You can look at the combined stdout and stderr of a given pod with this command:
``` osc get pods # list all pods in your project
.sti/ osc logs <pod-name>
└── bin/ - scripts used by source-to-image
├── assemble - executed to produce a Docker image with your code and dependencies during build
└── run - executed to start your app during deployment
openshift/ - application templates for OpenShift This can be useful to observe the correct functioning of your application.
scripts/ - helper scripts to automate some tasks
gunicorn_conf.py - configuration for the gunicorn HTTP server ## Special environment variables
requirements.txt - list of dependencies ### APP_CONFIG
```
You can fine tune the gunicorn configuration through the environment variable `APP_CONFIG` that, when set, should point to a config file as documented [here](http://docs.gunicorn.org/en/latest/settings.html).
### DJANGO_SECRET_KEY
When using one of the templates provided in this repository, this environment variable has its value automatically generated. For security purposes, make sure to set this to a random string as documented [here](https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SECRET_KEY).
## One-off command execution
At times you might want to manually execute some command in the context of a running application in OpenShift.
You can drop into a Python shell for debugging, create a new user for the Django Admin interface, or perform any other task.
You can do all that by using regular CLI commands from OpenShift.
To make it a little more convenient, you can use the script `openshift/scripts/run-in-container.sh` that wraps some calls to `osc`.
In the future, the `osc` CLI tool might incorporate changes
that make this script obsolete.
Here is how you would run a command in a pod specified by label:
1. Inpect the output of the command below to find the name of a pod that matches a given label:
osc get pods -l <your-label-selector>
2. Open a shell in the pod of your choice:
osc exec -p <pod-name> -it -- bash
3. Because of how `kubectl exec` and `osc exec` work right now, your current working directory is root (/). Change it to where your code lives:
cd $HOME
4. Because of how the images produced with CentOS and RHEL work currently, you need to manually enable any Software Collections you need to use:
source scl_source enable python33
5. Finally, execute any command that you need and exit the shell.
Related GitHub issues:
1. https://github.com/GoogleCloudPlatform/kubernetes/issues/8876
2. https://github.com/GoogleCloudPlatform/kubernetes/issues/7770
3. https://github.com/openshift/origin/issues/2001
The wrapper script combines the steps above into one. You can use it like this:
./run-in-container.sh ./manage.py migrate # manually migrate the database
# (done for you as part of the deployment process)
./run-in-container.sh ./manage.py createsuperuser # create a user to access Django Admin
./run-in-container.sh ./manage.py shell # open a Python shell in the context of your app
If your Django pods are labeled with a name other than "django", you can use:
POD_NAME=name ./run-in-container.sh ./manage.py check
If there is more than one replica, you can also specify a POD by index:
POD_INDEX=1 ./run-in-container.sh ./manage.py shell
Or both together:
POD_NAME=frontend POD_INDEX=2 ./run-in-container.sh ./manage.py shell
## Data persistence ## Data persistence

View file

@ -1,2 +0,0 @@
# Send the access log to stderr so it can be consumed by `osc logs`.
accesslog = "-"

View file

@ -8,52 +8,29 @@
# convenient to use. In the future, the `osc` cli tool might incorporate changes # convenient to use. In the future, the `osc` cli tool might incorporate changes
# that make this script obsolete. # that make this script obsolete.
# Here is how you would run a command in a pod specified by label [1]:
#
# 1. Inpect the output of the command below to find the name of a pod that
# matches a given label:
#
# osc get pods -l <your-label-selector>
#
# 2. Open a bash shell in the pod of your choice:
#
# osc exec -p <pod-name> -it -- bash
#
# 3. Because of how `kubectl exec` and `osc exec` work right now [2], your
# current working directory is root (/). Change it to where your code lives:
#
# cd $HOME
#
# 4. Because of how the images produced with CentOS and RHEL work currently [3],
# you need to manually enable any Software Collections you need to use:
#
# source scl_source enable python33
#
# 5. Finally, execute any command that you need and exit the shell.
#
# Related GitHub issues: # Related GitHub issues:
# [1] https://github.com/GoogleCloudPlatform/kubernetes/issues/8876 # [1] https://github.com/GoogleCloudPlatform/kubernetes/issues/8876
# [2] https://github.com/GoogleCloudPlatform/kubernetes/issues/7770 # [2] https://github.com/GoogleCloudPlatform/kubernetes/issues/7770
# [3] https://github.com/openshift/origin/issues/2001 # [3] https://github.com/openshift/origin/issues/2001
# You can use this wrapper like this: # Usage examples:
# #
# ./run-in-container.sh ./manage.py migrate # ./run-in-container.sh ./manage.py migrate
# ./run-in-container.sh ./manage.py createsuperuser # ./run-in-container.sh ./manage.py createsuperuser
# ./run-in-container.sh tail -f access.log # ./run-in-container.sh ./manage.py shell
# #
# If your Python pods are labeled with a name other than "django", you can use: # If your Python pods are labeled with a name other than "django", you can use:
# #
# POD_NAME=something ./run-in-container.sh ./manage.py check # POD_NAME=name ./run-in-container.sh ./manage.py check
# #
# You can also specify a POD by index: # If there is more than one replica, you can also specify a POD by index:
# #
# POD_INDEX=1 ./run-in-container.sh tail -f access.log # POD_INDEX=1 ./run-in-container.sh ./manage.py shell
# #
# Or both together: # Or both together:
# #
# POD_NAME=frontend POD_INDEX=2 ./run-in-container.sh tail -f access.log # POD_NAME=frontend POD_INDEX=2 ./run-in-container.sh ./manage.py shell
# Get name of a currently deployed pod by label and index # Get name of a currently deployed pod by label and index

View file

@ -181,10 +181,6 @@
"name": "DATABASE_PASSWORD", "name": "DATABASE_PASSWORD",
"value": "${DATABASE_PASSWORD}" "value": "${DATABASE_PASSWORD}"
}, },
{
"name": "APP_MODULE",
"value": "${APP_MODULE}"
},
{ {
"name": "APP_CONFIG", "name": "APP_CONFIG",
"value": "${APP_CONFIG}" "value": "${APP_CONFIG}"
@ -327,19 +323,13 @@
"generate": "expression", "generate": "expression",
"from": "[a-zA-Z0-9]{16}" "from": "[a-zA-Z0-9]{16}"
}, },
{
"name": "APP_MODULE",
"description": "Python dotted path to your Django WSGI application",
"value": "project.wsgi"
},
{ {
"name": "APP_CONFIG", "name": "APP_CONFIG",
"description": "Relative path to Gunicorn configuration file (optional)", "description": "Relative path to Gunicorn configuration file (optional)"
"value": "gunicorn_conf.py"
}, },
{ {
"name": "DJANGO_SECRET_KEY", "name": "DJANGO_SECRET_KEY",
"description": "Django secret key", "description": "Set this to a long random string",
"generate": "expression", "generate": "expression",
"from": "[\\w]{50}" "from": "[\\w]{50}"
} }

View file

@ -159,10 +159,6 @@
"name": "DATABASE_PASSWORD", "name": "DATABASE_PASSWORD",
"value": "${DATABASE_PASSWORD}" "value": "${DATABASE_PASSWORD}"
}, },
{
"name": "APP_MODULE",
"value": "${APP_MODULE}"
},
{ {
"name": "APP_CONFIG", "name": "APP_CONFIG",
"value": "${APP_CONFIG}" "value": "${APP_CONFIG}"
@ -229,19 +225,13 @@
"description": "Image Stream of the builder image", "description": "Image Stream of the builder image",
"value": "python-33-centos7" "value": "python-33-centos7"
}, },
{
"name": "APP_MODULE",
"description": "Python dotted path to your Django WSGI application",
"value": "project.wsgi"
},
{ {
"name": "APP_CONFIG", "name": "APP_CONFIG",
"description": "Relative path to Gunicorn configuration file (optional)", "description": "Relative path to Gunicorn configuration file (optional)"
"value": "gunicorn_conf.py"
}, },
{ {
"name": "DJANGO_SECRET_KEY", "name": "DJANGO_SECRET_KEY",
"description": "Django secret key", "description": "Set this to a long random string",
"generate": "expression", "generate": "expression",
"from": "[\\w]{50}" "from": "[\\w]{50}"
} }