Skip to content
Snippets Groups Projects
Commit d169b823 authored by Sébastien DA ROCHA's avatar Sébastien DA ROCHA
Browse files

Merge branch 'development' into 'master'

Fix #10877 considère les tableaux de str comme str au lieu de json

See merge request !12
parents f927f33e 116a94d5
Branches
Tags
2 merge requests!14fix #11163 acces restreint,!12Fix #10877 considère les tableaux de str comme str au lieu de json
Pipeline #14080 passed
Showing with 46 additions and 12 deletions
...@@ -5,9 +5,13 @@ import msgpack ...@@ -5,9 +5,13 @@ import msgpack
from lib.serializers import encode_datetime, decode_datetime from lib.serializers import encode_datetime, decode_datetime
import re import re
import os import os
import json
import pytest import pytest
# Passer à True pour génerer des JSON à côté des MP (plus lisible)
DEBUG_OUTPUT_JSON = False
def _sanitize(name): def _sanitize(name):
""" """
Replace certain characters (which might be problematic when contained in Replace certain characters (which might be problematic when contained in
...@@ -19,8 +23,14 @@ def _sanitize(name): ...@@ -19,8 +23,14 @@ def _sanitize(name):
@pytest.fixture @pytest.fixture
def load_object(): def load_object(request):
def load_file(file_path):
module = request.module.__name__
node = _sanitize(request.node.name).replace("test_fix_field_types",
'test_get_entries_from_postgis')
def load_file(file_path_template):
file_path = file_path_template.format(node)
with open(file_path, 'rb') as file_stream: with open(file_path, 'rb') as file_stream:
expected_data = msgpack.unpackb(file_stream.read(), raw=False, object_hook=decode_datetime) expected_data = msgpack.unpackb(file_stream.read(), raw=False, object_hook=decode_datetime)
...@@ -51,9 +61,13 @@ def verify_objects(request): ...@@ -51,9 +61,13 @@ def verify_objects(request):
the_body = msgpack.packb(data, use_bin_type=True, default=encode_datetime) the_body = msgpack.packb(data, use_bin_type=True, default=encode_datetime)
file_stream.write(the_body) file_stream.write(the_body)
if DEBUG_OUTPUT_JSON:
with open(file_path+".json", "w") as file_stream:
the_body = json.dump(data, file_stream, default=encode_datetime, indent=4)
with open(file_path, 'rb') as file_stream: with open(file_path, 'rb') as file_stream:
expected_data = msgpack.unpackb(file_stream.read(), raw=False, object_hook=decode_datetime) expected_data = msgpack.unpackb(file_stream.read(), raw=False, object_hook=decode_datetime)
assert data == expected_data assert data == expected_data
yield check_data return check_data
Source diff could not be displayed: it is too large. Options to address this: view the blob.
File added
File added
...@@ -4,9 +4,12 @@ from workers.doc_enricher import get_entries_from_postgis ...@@ -4,9 +4,12 @@ from workers.doc_enricher import get_entries_from_postgis
@pytest.mark.vcr() @pytest.mark.vcr()
def test_get_entries_from_postgis(verify_objects): @pytest.mark.parametrize("url,name", [
link = dict(url='/rdata', ('/rdata', "apd_apidae.apdevenement"),
name='apd_apidae.apdevenement_2_0_0') ('/grandlyon', 'eco_ecologie.ecoannuproducteur_latest')
])
def test_get_entries_from_postgis(verify_objects, url, name):
link = dict(url=url, name=name)
cfg = dict() cfg = dict()
cfg['host'] = "147.135.219.0" cfg['host'] = "147.135.219.0"
......
from workers.doc_processor import fix_field_types
import os import os
import json import json
import pytest
from workers.doc_processor import fix_field_types
def test_fix_field_types(load_object, verify_objects): @pytest.mark.parametrize("url,name", [
('/rdata', "apd_apidae.apdevenement"),
('/grandlyon', 'eco_ecologie.ecoannuproducteur_latest')
])
def test_fix_field_types(load_object, verify_objects, url, name):
link = dict() link = dict()
link['url'] = "/rdata" link['url'] = url
link['name'] = "apd_apidae.apdevenement_2_0_0" link['name'] = name
filename = os.path.join('tests/data/prod_working_directory/', filename = os.path.join('tests/data/prod_working_directory/',
'field_catalog_by_dbschematable.json' ) 'field_catalog_by_dbschematable.json' )
...@@ -15,7 +22,7 @@ def test_fix_field_types(load_object, verify_objects): ...@@ -15,7 +22,7 @@ def test_fix_field_types(load_object, verify_objects):
with open(filename, 'r') as fp: with open(filename, 'r') as fp:
catalog = json.load(fp) catalog = json.load(fp)
docs = load_object('tests/data/test_doc_enricher.test_get_entries_from_postgis.0.mp') docs = load_object('tests/data/test_doc_enricher.{}.0.mp')
doc_page = [{'data-fr': feature} for feature in docs] doc_page = [{'data-fr': feature} for feature in docs]
out_docs = fix_field_types(doc_page, catalog, link) out_docs = fix_field_types(doc_page, catalog, link)
......
...@@ -78,6 +78,16 @@ def fix_field_types( in_docs, field_catalog, link): ...@@ -78,6 +78,16 @@ def fix_field_types( in_docs, field_catalog, link):
def typed_name(name, types): def typed_name(name, types):
type_of_name = types.get(name) type_of_name = types.get(name)
# Redmine 10877
# si on a ".0" dans le nom, c'est surement un tableau de str
# on le rangera dans _str au lieu de json pour ne pas perturber ElasticSearch
# (pour elastic, un teableau de str est un type str, pas un json)
type_of_name = type_of_name or types.get(f"{ name }.0")
# Si le type n'exite pas (address par exemple)
# c'est que la propriété est éclatée en address.code_poste, address.address
# donc on dit que c'est un json
if not type_of_name: if not type_of_name:
return f'{name}_json' return f'{name}_json'
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment