Skip to content

Commit 18b6f0d

Browse files
committed
Merge branch 'release/5.18.0'
2 parents 532cfd6 + b91e975 commit 18b6f0d

File tree

173 files changed

+2220
-951
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

173 files changed

+2220
-951
lines changed

CITATION.cff

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,5 +35,5 @@ keywords:
3535
- elasticsearch
3636
- natural language processing
3737
license: MIT
38-
version: 5.17.0
39-
date-released: '2025-02-13'
38+
version: 5.18.0
39+
date-released: '2025-04-10'

LICENSE

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
Copyright (c) 2017-2022 Digital Humanities Lab, Faculty of Humanities, Utrecht University
2-
Copyright (c) 2023-2024 Research Software Lab, Centre for Digital Humanities, Utrecht University
2+
Copyright (c) 2023-2025 Research Software Lab, Centre for Digital Humanities, Utrecht University
33

44
Permission is hereby granted, free of charge, to any person obtaining a copy
55
of this software and associated documentation files (the "Software"), to deal

README.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,15 @@ The [documentation directory](./documentation/) contains documentation for devel
3131

3232
## Licence
3333

34-
I-analyzer is shared under an MIT licence. See [LICENSE](./LICENSE) for more information.
34+
The source code of I-analyzer is shared under an MIT licence. See [LICENSE](./LICENSE) for the full licence statement.
35+
36+
### Images
37+
38+
This licence does *not* cover the images used for corpora, which are licensed individually. These images are located in the [corpora directory](/backend/corpora/), in the "images" folder for each corpus.
39+
40+
Each image is accompanied by a `*.license` file that provides information on licensing for that image. If you wish to reuse or distribute this repository including these images, you will have to ensure that you comply with the license terms of the image as well.
41+
42+
Some images currently lack a licence file. We are working on providing clear copyright information for all images; until then, assume that these images are protected under copyright.
3543

3644
## Citation
3745

backend/addcorpus/models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
validate_default_sort,
3333
validate_ngram_has_date_field,
3434
)
35-
from ianalyzer.elasticsearch import elasticsearch
35+
from es.client import elasticsearch
3636

3737
MAX_LENGTH_NAME = 126
3838
MAX_LENGTH_DESCRIPTION = 254

backend/addcorpus/python_corpora/tests/test_times_source.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def times_test_settings(settings):
1212
'times': join(settings.BASE_DIR, 'corpora/times/times.py')
1313
}
1414
settings.TIMES_DATA = join(settings.BASE_DIR, 'addcorpus/python_corpora/tests')
15-
settings.TIMES_ES_INDEX = 'times-test'
15+
settings.TIMES_ES_INDEX = 'test-times'
1616

1717

1818

backend/addcorpus/tests/test_corpus_views.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from users.models import CustomUser
77
from addcorpus.models import Corpus, CorpusDocumentationPage
88
from addcorpus.python_corpora.save_corpus import load_and_save_all_corpora
9+
from addcorpus.json_corpora.validate import corpus_schema
910

1011
def test_no_corpora(db, settings, admin_client):
1112
Corpus.objects.all().delete()
@@ -160,3 +161,7 @@ def test_corpus_edit_views(admin_client: Client, json_corpus_definition: Dict, j
160161
assert status.is_success(response.status_code)
161162
assert len(response.data) == 1
162163

164+
165+
def test_corpus_schema_view(client):
166+
response = client.get('/api/corpus/definition-schema')
167+
assert response.data == corpus_schema()

backend/addcorpus/urls.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
11
from django.urls import path
2-
from addcorpus.views import CorpusImageView, CorpusView, CorpusDocumentationPageViewset, CorpusDocumentView
2+
from addcorpus.views import (
3+
CorpusImageView, CorpusView, CorpusDocumentView, CorpusDefinitionSchemaView
4+
)
35

46
urlpatterns = [
57
path('', CorpusView.as_view({'get': 'list'})),
68
path('image/<str:corpus>', CorpusImageView.as_view()),
79
path('document/<str:corpus>/<str:filename>', CorpusDocumentView.as_view()),
10+
path('definition-schema', CorpusDefinitionSchemaView.as_view()),
811
]

backend/addcorpus/views.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from rest_framework.response import Response
2020
from rest_framework.status import HTTP_200_OK
2121
from rest_framework.views import APIView
22+
from addcorpus.json_corpora.validate import corpus_schema
2223

2324

2425
class CorpusView(viewsets.ReadOnlyModelViewSet):
@@ -124,3 +125,13 @@ def info(self, request, pk):
124125
info = get_csv_info(obj.file.path, sep=delimiter if delimiter else ',')
125126

126127
return Response(info, HTTP_200_OK)
128+
129+
130+
class CorpusDefinitionSchemaView(APIView):
131+
'''
132+
View the JSON schema for corpus definitions
133+
'''
134+
135+
def get(self, request, *args, **kwargs):
136+
schema = corpus_schema()
137+
return Response(schema, HTTP_200_OK)

backend/conftest.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,18 @@
77
from elasticsearch import Elasticsearch
88
import warnings
99

10-
from ianalyzer.elasticsearch import client_from_config
10+
from es.client import client_from_config
1111
from addcorpus.python_corpora.save_corpus import load_and_save_all_corpora
12-
from es import es_index as index, sync
12+
from es import sync
13+
from indexing.create_job import create_indexing_job
14+
from indexing.run_job import perform_indexing
1315
from django.conf import settings
1416
from django.contrib.auth.models import Group
1517
from addcorpus.models import Corpus
1618
from addcorpus.serializers import CorpusJSONDefinitionSerializer
1719
from es.models import Server
20+
from rest_framework.test import APIClient
21+
1822

1923
@pytest.fixture(autouse=True)
2024
def media_dir(tmpdir, settings):
@@ -165,8 +169,8 @@ def _index_test_corpus(es_client: Elasticsearch, corpus_name: str):
165169

166170
if not es_client.indices.exists(index=corpus.configuration.es_index):
167171
with warnings.catch_warnings():
168-
job = index.create_indexing_job(corpus)
169-
index.perform_indexing(job)
172+
job = create_indexing_job(corpus)
173+
perform_indexing(job)
170174

171175
# ES is "near real time", so give it a second before we start searching the index
172176
sleep(2)
@@ -236,3 +240,11 @@ def json_mock_corpus(db, json_corpus_definition) -> Corpus:
236240
corpus.configuration.save()
237241

238242
return corpus
243+
244+
@pytest.fixture(scope='session')
245+
def celery_config():
246+
return {
247+
'task_serializer': 'pickle',
248+
'result_serializer': 'pickle',
249+
'accept_content': ['json', 'pickle'],
250+
}

backend/corpora/dbnl/dbnl.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,9 +360,31 @@ def _xml_files(self):
360360
extractor=XML(
361361
Tag(utils.LINE_TAG),
362362
TransformTag(utils.pad_content),
363+
TransformTag(utils.replace_notes_with_ref),
363364
multiple=True,
364365
flatten=True,
365-
transform=lambda lines: '\n'.join(lines).strip() if lines else None,
366+
transform=utils.join_paragraphs,
367+
),
368+
es_mapping=main_content_mapping(token_counts=True),
369+
visualizations=['wordcloud'],
370+
language='dynamic',
371+
)
372+
373+
notes = FieldDefinition(
374+
name='notes',
375+
display_name='Notes',
376+
description='Notes (e.g. footnotes) added to the content',
377+
display_type='text_content',
378+
results_overview=False,
379+
search_field_core=True,
380+
csv_core=True,
381+
extractor=XML(
382+
Tag('note'),
383+
TransformTag(utils.pad_content),
384+
TransformTag(utils.insert_ref),
385+
multiple=True,
386+
flatten=True,
387+
transform=utils.join_paragraphs,
366388
),
367389
es_mapping=main_content_mapping(token_counts=True),
368390
visualizations=['wordcloud'],
@@ -418,6 +440,7 @@ def _xml_files(self):
418440
chapter_title,
419441
chapter_index,
420442
content,
443+
notes,
421444
has_content,
422445
is_primary,
423446
]
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
No copyright restrictions.
2+
3+
Image source: https://www.dbnl.org/tekst/alph002vana01_01/alph002vana01_01_0001.php

backend/corpora/dbnl/tests/test_dbnl_extraction.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ def test_append_to_tag(xml, tag, padding, original_output, new_output):
107107
'Cornelis Maertsz. tot Wervers hoof.',
108108
'\'t Amsterdam Voor Michiel de Groot, Boek-Verkooper op den Nieuwen Dijck, 1671.',
109109
]),
110+
'notes': None,
110111
'chapter_title': None,
111112
'chapter_index': 1,
112113
'has_content': True,
@@ -199,4 +200,25 @@ def test_dbnl_extraction(dbnl_corpus):
199200
assert expected[key] == actual[key]
200201
assert expected.items() <= actual.items()
201202

203+
section_with_footnote = '''
204+
<div>
205+
<p>
206+
<pb n="128"/>geen zekerlijk in de twee bedoelde taalen zeer in elkaâr loopt. Althans in de Constructie geloof ik niet dat de reden kan gezocht worden<note n="a" place="foot">Eenigsints belagchelijk wordt het, wanneer men zich te Stockholm geduurig, tot bewijs der overëenkomst tusschen Zweedsch en Engelsch, de zelfde drie of vier woorden hoort voorzeggen, zonder dat men, om meerdere voorbeelden vraagende, zoo ligtelijk antwoord ontvangt.</note>. Voor 't overige kan ieder Hollander, die er nog een paar der gewoonste Europaesche taalen bij bezit, het Zweedsch, even als het Deensch, zich zelve leeren. In de Poësie evenwel ontbreekt het niet aan eene meenigte woorden, die men vruchteloos uit de Analogie zou willen verklaaren, en die het leezen der Dichters zeer vermoeiëlijken.<note n="b" place="foot">Van een Hoogduitsch - Zweedsch, en Zweedsch - Hoogduitsch Woordenboek van <hi rend="sc">möller</hi> bezit ik nog maar de twee eerste deelen in 40., welke het Hoogduitsch gedeelte bevatten; ik weet niet of het overige reeds gevolgd is, of nog volgen zal. Het is reeds van 1785. Eene kleine <hi rend="i">Grammatica</hi> van <hi rend="sc">abr. Sahlstedt</hi> is in 1796 in 't Hoogduitsch overgezet. Over het Lapsch en Finsch, twee van het Zweedsch geheel onderscheidene taalen, welke ook in dit Koninkrijk gesproken worden, zal het voegsaamer zijn op eene andere plaats te handelen.</note>
207+
</p>
208+
</div>
209+
'''
202210

211+
expected_content = '''geen zekerlijk in de twee bedoelde taalen zeer in elkaâr loopt. Althans in de Constructie geloof ik niet dat de reden kan gezocht worden[1]. Voor 't overige kan ieder Hollander, die er nog een paar der gewoonste Europaesche taalen bij bezit, het Zweedsch, even als het Deensch, zich zelve leeren. In de Poësie evenwel ontbreekt het niet aan eene meenigte woorden, die men vruchteloos uit de Analogie zou willen verklaaren, en die het leezen der Dichters zeer vermoeiëlijken.[2]'''
212+
213+
expected_notes = '''[1] Eenigsints belagchelijk wordt het, wanneer men zich te Stockholm geduurig, tot bewijs der overëenkomst tusschen Zweedsch en Engelsch, de zelfde drie of vier woorden hoort voorzeggen, zonder dat men, om meerdere voorbeelden vraagende, zoo ligtelijk antwoord ontvangt.
214+
[2] Van een Hoogduitsch - Zweedsch, en Zweedsch - Hoogduitsch Woordenboek van möller bezit ik nog maar de twee eerste deelen in 40., welke het Hoogduitsch gedeelte bevatten; ik weet niet of het overige reeds gevolgd is, of nog volgen zal. Het is reeds van 1785. Eene kleine Grammatica van abr. Sahlstedt is in 1796 in 't Hoogduitsch overgezet. Over het Lapsch en Finsch, twee van het Zweedsch geheel onderscheidene taalen, welke ook in dit Koninkrijk gesproken worden, zal het voegsaamer zijn op eene andere plaats te handelen.'''
215+
216+
def test_footnotes_extraction(dbnl_corpus):
217+
corpus = load_corpus_definition(dbnl_corpus)
218+
soup = BeautifulSoup(section_with_footnote, 'lxml-xml')
219+
220+
content = corpus.content.extractor.apply(None, soup)
221+
assert content == expected_content
222+
223+
notes = corpus.notes.extractor.apply(None, soup)
224+
assert notes == expected_notes

backend/corpora/dbnl/utils.py

Lines changed: 40 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
from bs4 import BeautifulSoup
33
import os
44
from langcodes import standardize_tag, Language
5+
from typing import List
56

7+
from copy import copy
68
from addcorpus.python_corpora.extract import Pass, Combined, CSV
79

810
# === METADATA EXTRACTION ===
@@ -179,13 +181,6 @@ def append_to_tag(soup, tag, padding):
179181
tag.append(padding)
180182

181183
return soup
182-
183-
def pad_content(node):
184-
pad_cells = lambda n: append_to_tag(n, 'cell', ' ')
185-
pad_linebreaks = lambda n: append_to_tag(n, 'lb', '\n')
186-
pad_cells(pad_linebreaks(node))
187-
return [node]
188-
189184
def standardize_language_code(code):
190185
if code:
191186
return standardize_tag(code)
@@ -205,3 +200,41 @@ def language_name(code):
205200
codes
206201
))
207202
return ', '.join(names)
203+
204+
## ======== TEXT FORMATTING =============
205+
206+
def pad_content(node):
207+
pad_cells = lambda n: append_to_tag(n, 'cell', ' ')
208+
pad_linebreaks = lambda n: append_to_tag(n, 'lb', '\n')
209+
pad_cells(pad_linebreaks(node))
210+
return [node]
211+
212+
213+
def get_ref(node):
214+
prev = node.find_all_previous('note')
215+
return len(prev) + 1
216+
217+
def insert_ref(node):
218+
'''
219+
Adds a reference, e.g. `[1]` at the start of a node's contents
220+
'''
221+
ref = get_ref(node)
222+
node.insert(0, f'[{ref}] ')
223+
yield node
224+
225+
def replace_notes_with_ref(node):
226+
'''
227+
Replaces all `<note>` tags in the a beautiful soup node with a reference, e.g. `[1]`
228+
'''
229+
new_node = copy(node) # make a copy to avoid altering the original document
230+
tags = zip(node.find_all('note'), new_node.find_all('note'))
231+
232+
for old_tag, to_replace in tags:
233+
ref = get_ref(old_tag)
234+
to_replace.replace_with(f'[{ref}]')
235+
236+
yield new_node
237+
238+
def join_paragraphs(paragraphs: List[str]):
239+
text = '\n'.join(paragraphs).strip()
240+
return text if text else None
Loading
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
(c) Massimo Catarinella
2+
3+
Source: https://commons.wikimedia.org/wiki/File:RotterdamMaasNederland.jpg
4+
5+
Licensed under a Creative Commons Attribution 3.0 Unported licence (https://creativecommons.org/licenses/by/3.0/deed.en)
6+
7+
Modifications made from original: cropped image
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
This corpus contains the public portion of the [Delpher newspapers dataset](https://www.kb.nl/en/research-find/datasets/delpher-newspapers). This is a collection of Dutch newspapers published between 1618 and 1876, maintained by the KB, the National Library of the Netherlands.
2+
3+
### What is in Delpher Newspapers?
4+
5+
Are you interested to know which countries featured in the news in the 18th century, and which did not? Or how newspapers reported on reforms in the Batavian Republic? Delpher newspapers is a great resource for research into historical newspapers. But this dataset is also useful for broader-based history projects. It gives a peek behind the scenes of what was going on in society.
6+
7+
The full *Delpher newspapers* collection comprises almost 2 million newspapers dating from 1618 until 1995. This collection contains publications published at least 140 years ago, which are no longer subject to copyright.
8+
9+
It includes major national newspapers, such as the *Algemeen Handelsblad*, as well as regional newspapers, such as the *Leydse courant*, and colonial papers, such as the *Java-bode*.
10+
11+
Please note that our collection currently includes only the newspaper set from Delpher and does not (yet) contain the [historical magazine collection](https://www.kb.nl/en/research-find/datasets/delpher-magazines) from the period 1800-2000.
12+
13+
### Recent newspapers (1876-1995)
14+
15+
All users have direct access to the Delpher newspaper dataset for the period 1600–1876. A more recent dataset, which includes newspapers up to 1995, is also available for exploration in I-Analyzer. However, access requires prior permission from the rightsholder, The National Library of the Netherlands (KB). To use the full newspaper dataset, you must first complete and sign an individual agreement to comply with the KB's usage conditions. This form can be requested via
16+
[[email protected]](mailto:[email protected]), after which we can assist you with finalizing the application process.

backend/corpora/dutchnewspapers/dutchnewspapers_public.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ class DutchNewspapersPublic(XMLCorpusDefinition):
3838
image = 'dutchnewspapers.jpg'
3939
languages = ['nl']
4040
category = 'periodical'
41+
description_page = 'description_public.md'
4142
citation_page = 'citation_public.md'
4243

4344
@property
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
(c) Roman Kraft
2+
3+
Source: https://unsplash.com/photos/man-sitting-on-bench-reading-newspaper-_Zua2hyvTBk
4+
5+
Licensed under the Unsplash licence (https://unsplash.com/license)

backend/corpora/ecco/images/ecco.jpg

49.7 KB
Loading
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
No copyright restrictions
2+
3+
Image source: https://commons.wikimedia.org/wiki/File:English_humorists_of_the_eighteenth_century_-_Sir_Richard_Steele,_Joseph_Addison,_Laurence_Sterne,_Oliver_Goldsmith_(1906)_(14802513503).jpg
Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,5 @@
1-
DIOPTRA-L contains Goodreads reviews of contemporary literary texts that have been translated from and into the following languages: Afrikaans, Dutch, English, French, German, Italian, Portuguese and Spanish. A variety of genres are represented, and the corpus is searchable by, for example, language pair, author, title and genre. For more information, or suggestions, please contact Haidee Kotze ([email protected]) and Gys-Walt van Egdom ([email protected])
1+
DIOPTRA-L contains Goodreads reviews of contemporary literary texts that have been translated from and into the following languages: Afrikaans, Dutch, English, French, German, Italian, Portuguese and Spanish. A variety of genres are represented, and the corpus is searchable by, for example, language pair, author, title and genre. For more information, or suggestions, please contact Haidee Kotze ([email protected]) and Gys-Walt van Egdom ([email protected])
2+
3+
### Image attribution
4+
5+
The DIOPTRA-L logo used as an image for this corpus was created by Gys-Walt van Egdom and Haidee Kotze and is licensed under a [Creative Commons Attribution-Share Alike 4.0 licence](https://creativecommons.org/licenses/by-sa/4.0/deed.en).
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
(c) Gys-Walt van Egdom (https://www.uu.nl/staff/GMWvanEgdom) and Haidee Kotze (https://www.uu.nl/staff/HKotze)
2+
3+
Licensed under a Creative Commons Attribution-Share Alike 4.0 International licence (https://creativecommons.org/licenses/by-sa/4.0/deed.en).

backend/corpora/guardianobserver/description/guardianobserver.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,7 @@ Political alignment: Centre-left; British republicanism
2828
### Availability
2929

3030
The Guardian/Observer corpus is published by [ProQuest](https://en.wikipedia.org/wiki/ProQuest) and is only available to members of Utrecht University.
31+
32+
### Image attribution
33+
34+
The image used for this corpus was created by Michael Brunton-Spall ([image source](https://www.flickr.com/photos/27778872@N08/5084043824)) and is shared under a [Creative Commons Attribution 2.0 licence](https://creativecommons.org/licenses/by/2.0/).

backend/corpora/guardianobserver/guardianobserver.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
from django.conf import settings
1717

18-
from es.es_update import update_document
18+
from indexing.run_update_task import update_document
1919
from addcorpus.python_corpora import extract
2020
from addcorpus.python_corpora import filters
2121
from addcorpus.python_corpora.corpus import XMLCorpusDefinition, FieldDefinition, until, after, string_contains, consolidate_start_end_years
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
(c) Michael Brunton-Spall
2+
3+
Image source: https://www.flickr.com/photos/27778872@N08/5084043824
4+
5+
Licensed under a Creative Commons Attribution 2.0 licence (https://creativecommons.org/licenses/by/2.0/).
Binary file not shown.
Loading
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
(c) Tadeáš Bednarz (https://www.wikidata.org/wiki/Q99692082)
2+
3+
Image source: https://commons.wikimedia.org/wiki/File:Jewish_cemetery_in_Cieszyn_2017.jpg
4+
5+
Licensed under a Creative Commons Attribution-Share Alike 4.0 International licence (https://creativecommons.org/licenses/by-sa/4.0/deed.en).

backend/corpora/jewishinscriptions/jewishinscriptions.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,11 @@ class JewishInscriptions(XMLCorpusDefinition):
2222
max_date = datetime(year=849, month=12, day=31)
2323
data_directory = settings.JEWISH_INSCRIPTIONS_DATA
2424
es_index = getattr(settings, 'JEWISH_INSCRIPTIONS_ES_INDEX', 'jewishinscriptions')
25-
image = 'jewish_inscriptions.jpg'
25+
image = 'jewishinscriptions.jpg'
2626
visualize = []
2727
languages = ['heb', 'lat']
2828
category = 'inscription'
2929

30-
3130
tag_toplevel = CurrentTag()
3231
tag_entry = Tag('TEI')
3332

Binary file not shown.
Loading
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
(c) KannanVM (https://commons.wikimedia.org/wiki/User:KannanVM)
2+
3+
Image source: https://commons.wikimedia.org/wiki/File:Inscriptions_at_Jewish_Synagogue_at_Kottayil_Kovilakom1.jpg
4+
5+
Licensed under a Creative Commons Attribution-Share Alike 4.0 International licence (https://creativecommons.org/licenses/by-sa/4.0/deed.en).

0 commit comments

Comments
 (0)