Commit 2960a68d authored by Alessandro CERIONI's avatar Alessandro CERIONI
Browse files

Improved logging.

parent c41cf1f3
Pipeline #3196 passed with stage
in 45 seconds
......@@ -4,12 +4,12 @@ WORKDIR /app
RUN mkdir cache
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY api.py .
COPY main.py .
RUN pip install -r requirements.txt
EXPOSE 8000
VOLUME /cache
CMD gunicorn --workers=2 --timeout 300 -b 0.0.0.0:8000 --preload api:api
CMD gunicorn --workers=2 --timeout 300 -b 0.0.0.0:8000 --log-level=info --preload api:api
......@@ -4,6 +4,8 @@ import os.path
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
import random
import logging
#logging.basicConfig(format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S %z")
from main import refresh_cache
......@@ -15,10 +17,9 @@ scheduler = BackgroundScheduler()
# we let the the various instances of this service refresh the cache at different times,
# with at least 5 minutes between each request. The following configuration allows for
# 10 concurrent requests.
random_minute = str(5*random.randint(0, 9))
scheduler.add_job(refresh_cache, 'cron', hour='6', minute=random_minute)
#scheduler.add_job(refresh_cache, 'interval', seconds=60)
random_minute = 5*random.randint(0, 9)
scheduler.add_job(refresh_cache, 'cron', hour=6, minute=random_minute)
#scheduler.add_job(refresh_cache, 'interval', seconds=random_minute)
scheduler.start()
# Shut down the scheduler when exiting the app
......@@ -26,6 +27,15 @@ atexit.register(lambda: scheduler.shutdown())
api = Flask(__name__, static_url_path='')
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
api.logger.handlers = gunicorn_logger.handlers
api.logger.setLevel(gunicorn_logger.level)
api.logger.info("Cache will refreshed every day at %s minutes past 6 AM (GMT)." % random_minute)
@api.route("/")
def index():
......
......@@ -16,7 +16,14 @@ except ImportError:
with open("config.yaml", 'r') as yamlfile:
cfg = load(yamlfile, Loader=Loader)
logging.basicConfig(format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S %z")
#logging.basicConfig(format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S %z")
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
log = logging.getLogger("cache-refresher")
log.handlers = gunicorn_logger.handlers
log.setLevel(gunicorn_logger.level)
def refresh_cache():
......@@ -37,18 +44,21 @@ def refresh_cache():
#
# if file_found == False or age > 86400:
logging.info('Fetching a new file...')
log.info('Refreshing cache...')
done = False
while not done:
res = requests.get(url)
if res.status_code != 200:
logging.error('Something went wrong. Sleeping for 5 seconds before retrying...')
log.error('Something went wrong when hitting the following URL: %s' % url)
log.error('Here is the response:')
log.error(res)
log.error('Sleeping for 5 seconds before retrying...')
time.sleep(5)
done = False
else:
logging.info('Done.')
log.info('Done.')
done = True
break
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment