Newer
Older
import pika
import msgpack
import time
import json
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import AuthorizationException
import hashlib
from utils.exit_gracefully import exit_gracefully
from utils.my_logging import logging
from pprint import pprint
def tag_doc( the_doc ):
# tag_list = ['isOpenAccess', 'isRealTime', 'isQueryable', 'isSearchable', 'isPunctual', 'isLinear', 'isAreal']
# isOpen?
if not 'legalConstraints' in the_doc['metadata-fr'].keys() or not any( [x in the_doc['metadata-fr']['legalConstraints'] for x in ['Licence Associée', 'Licence Engagée'] ] ):
tag_dict['isOpenAccess'] = True
# isRealTime?
if 'updateFrequency' in the_doc['metadata-fr'].keys() and 'continual' in the_doc['metadata-fr']['updateFrequency']:
tag_dict['isRealTime'] = True
Alessandro Cerioni
committed
if 'link' in the_doc['metadata-fr'].keys():
for link in the_doc['metadata-fr']['link']:
#print(link)
if any( [x in link['protocol'] for x in ['OGC:WFS', 'OGC:WMS', 'KML', 'WS']] ):
tag_dict['isQueryable'] = True
break
# N.B.: in order to determine the following tags, we need the data-fr field;
# in case the data-fr field is absent, the tags 'isSearchable',
# 'isPunctual', 'isLinear', 'isAreal' will be absent instead of being 'falsely' set to false!
# isSearchable?
if 'data-fr' in the_doc.keys():
tag_dict['isSearchable'] = True
# init
tag_dict['isPunctual'] = False
tag_dict['isLinear'] = False
tag_dict['isAreal'] = False
# isPunctual?
if any( [x in the_doc['data-fr']['geometry']['type'] for x in ['Point', 'MultiPoint']] ):
tag_dict['isPunctual'] = True
# isLinear?
if any( [x in the_doc['data-fr']['geometry']['type'] for x in ['LineString', 'MultiLineString']] ):
tag_dict['isLinear'] = True
# isAreal?
if any( [x in the_doc['data-fr']['geometry']['type'] for x in ['Polygon', 'MultiPolygon']] ):
tagged_doc = {'editorial-metadata-en': tag_dict, **the_doc}
return tagged_doc
def index_docs(channel, method, properties, body, es):
t1 = time.time()
if type(decoded_body['body']) is list:
#docs_to_index = decoded_body['body']
docs_to_index = [tag_doc(doc) for doc in decoded_body['body']]
else:
#docs_to_index = [decoded_body['body']]
docs_to_index = [tag_doc(decoded_body['body'])]
#print(docs_to_index)
es_index = decoded_body['header']['index']['_index']
Alessandro Cerioni
committed
es_body = { "settings" : {
"number_of_shards" : 1,
"number_of_replicas" : 0,
"index.mapping.total_fields.limit": 10000,
"refresh_interval": "30s"
},
"mappings": {
"_doc": {
"dynamic_templates": [ # priority is given by order!
{
"uuid" : {
"path_match": "uuid",
"mapping": {
"type": "keyword",
}
}
},
{
"default" : {
"path_match": "*",
"mapping": {
Alessandro Cerioni
committed
}
}
}
]
}
}
}
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#es_body.update({"mappings": {"_doc": {"dynamic_date_formats": ["strict_date_optional_time"]}}})
try:
rep = es.indices.create(es_index, es_body)#, wait_for_active_shards=0)
except:
pass
logging.info("Pushing %i documents to Elasticsearch..." % len(docs_to_index))
es_body = ''
header = decoded_body['header']
for doc in docs_to_index:
# try:
# header['index']['_id'] = doc['_id'] #hashlib.md5( json.dumps(doc, sort_keys=True).encode("utf-8") ).hexdigest()
# del doc['_id']
# except:
# header['index']['_id'] = hashlib.md5( json.dumps(doc, sort_keys=True).encode("utf-8") ).hexdigest()
header['index']['_id'] = hashlib.md5( json.dumps(doc, sort_keys=True).encode("utf-8") ).hexdigest()
es_body += '{0}\n{1}\n'.format(json.dumps(header), json.dumps(doc))
rep = es.bulk(body=es_body)
#print(rep)
t2 = time.time()
if rep['errors'] == False:
channel.basic_ack(delivery_tag = method.delivery_tag)
#print("")
logging.info("Done in %s seconds." % (t2-t1))
else:
channel.basic_nack(delivery_tag = method.delivery_tag, requeue=1)
#print("")
logging.error(json.dumps(rep, indent=4))
logging.error("Failed")
#time.sleep(5)
return
def main(cfg):
es = Elasticsearch([cfg['indexer']['url']], timeout=60)
es_logger = logging.getLogger('elasticsearch')
es_logger.setLevel(logging.INFO)
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
connection = pika.BlockingConnection(pika.ConnectionParameters(host=cfg['rabbitmq']['host']))
channel = connection.channel()
exchange = cfg['rabbitmq']['exchange']
# the queue this program will be consuming messages from
docs_to_index_qn = cfg['session']['id'] + '_' + cfg['rabbitmq']['queue_name_2_suffix']
channel.basic_qos(prefetch_count=1)
channel.basic_consume(lambda ch, method, properties, body: index_docs(ch, method, properties, body, es), queue=docs_to_index_qn)#, no_ack=True)
channel.start_consuming()
connection.close()
if __name__ == '__main__':
import yaml
import time
import signal
signal.signal(signal.SIGINT, exit_gracefully)
with open("config.yaml", 'r') as yamlfile:
cfg = yaml.load(yamlfile)
while True:
try:
main(cfg)
except pika.exceptions.ChannelClosed:
logging.info("Waiting for tasks...")
time.sleep(5)
except AuthorizationException as e:
logging.error(e)
exit(1)