- add image import script for Pechgraben images

This commit is contained in:
Arno Kaimbacher 2022-03-11 16:10:52 +01:00
parent dc58b7235f
commit d33e9d2b55
9 changed files with 503 additions and 112 deletions

View file

@ -10,6 +10,12 @@
<gml:identifier codeSpace=\"uniqueID\">{procedure_identifier}</gml:identifier>
<sml:identification>
<sml:IdentifierList>
<sml:identifier>
<sml:Term definition=\"urn:ogc:def:identifier:OGC:1.0:longName\">
<sml:label>longName</sml:label>
<sml:value>{procedure_name}</sml:value>
</sml:Term>
</sml:identifier>
<sml:identifier>
<sml:Term definition=\"urn:ogc:def:identifier:OGC:1.0:shortName\">
<sml:label>shortName</sml:label>
@ -28,6 +34,20 @@
</sml:capability>
</sml:CapabilityList>
</sml:capabilities>
<sml:capabilities name=\"metadata\">
<sml:CapabilityList> <!-- status indicates, whether sensor is insitu (true) or remote (false) -->
<sml:capability name=\"insitu\">
<swe:Boolean definition=\"insitu\">
<swe:value>true</swe:value>
</swe:Boolean>
</sml:capability> <!-- status indicates, whether sensor is mobile (true) or fixed/stationary (false) -->
<sml:capability name=\"mobile\">
<swe:Boolean definition=\"mobile\">
<swe:value>false</swe:value>
</swe:Boolean>
</sml:capability>
</sml:CapabilityList>
</sml:capabilities>
<sml:featuresOfInterest>
<sml:FeatureList definition=\"http://www.opengis.net/def/featureOfInterest/identifier\">
<swe:label>featuresOfInterest</swe:label>
@ -48,10 +68,10 @@
</sml:featuresOfInterest>
<sml:outputs>
<sml:OutputList>
<sml:output name=\"Image\">
<sml:output name=\"HumanVisualPerception\">
<swe:DataRecord>
<swe:field name=\"manuel_observation\">
<swe:Text definition=\"manuel_observation\"/>
<swe:field name=\"HumanVisualPerception\">
<swe:Text definition=\"HumanVisualPerception\"/>
</swe:field>
</swe:DataRecord>
</sml:output>
@ -61,19 +81,19 @@
<swe:Vector referenceFrame=\"urn:ogc:def:crs:EPSG::4326\">
<swe:coordinate name=\"easting\">
<swe:Quantity axisID=\"x\">
<swe:uom code=\"degree\" />
<swe:uom code=\"degree\"/>
<swe:value>{cord_x}</swe:value>
</swe:Quantity>
</swe:coordinate>
<swe:coordinate name=\"northing\">
<swe:Quantity axisID=\"y\">
<swe:uom code=\"degree\" />
<swe:uom code=\"degree\"/>
<swe:value>{cord_y}</swe:value>
</swe:Quantity>
</swe:coordinate>
<swe:coordinate name=\"altitude\">
<swe:Quantity axisID=\"z\">
<swe:uom code=\"m\" />
<swe:uom code=\"m\"/>
<swe:value>{height}</swe:value>
</swe:Quantity>
</swe:coordinate>

View file

@ -111,8 +111,8 @@
<swes:observableProperty>http://www.opengis.net/def/property/humanVisualPerception</swes:observableProperty>
<swes:metadata>
<sos:SosInsertionMetadata>
<sos:observationType>http://www.opengis.net/def/observationType/OGCOM/2.0/OM_CategoryObservation</sos:observationType>
<sos:featureOfInterestType>http://www.opengis.net/def/samplingFeatureType/OGCOM/2.0/SF_SamplingPoint</sos:featureOfInterestType>
<sos:observationType>http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_CategoryObservation</sos:observationType>
<sos:featureOfInterestType>http://www.opengis.net/def/samplingFeatureType/OGC-OM/2.0/SF_SamplingPoint</sos:featureOfInterestType>
</sos:SosInsertionMetadata>
</swes:metadata>
</swes:InsertSensor>

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,165 @@
'''
Sqlalchemy version: 1.2.15
Python version: 3.7
'''
import os
import uuid
from datetime import datetime
from sqlalchemy.orm import session
from sqlalchemy import asc, desc
from exif import Image
from db.models import (
create_pg_session, Observation,
Dataset, Procedure, Phenomenon, Platform, Format)
def main():
''' main method '''
pg_session: session = create_pg_session()
platform_sta_identifier = "pechgraben_images"
sensor = "camera2"
pg_query = pg_session.query(Dataset) \
.join(Procedure) \
.join(Phenomenon) \
.filter(Procedure.sta_identifier == sensor.lower())
visual_perception_dataset: Dataset = pg_query.filter(
Phenomenon.sta_identifier == "HumanVisualPerception").first()
if not visual_perception_dataset:
print("Sensor " + sensor + " ist noch nicht angelegt!")
exit()
if not visual_perception_dataset.is_published:
visual_perception_dataset.is_published = 1
visual_perception_dataset.is_hidden = 0
visual_perception_dataset.dataset_type = "timeseries"
visual_perception_dataset.observation_type = "simple"
visual_perception_dataset.value_type = "text"
pg_session.commit()
platform_exists: bool = pg_session.query(Platform.id).filter_by(
sta_identifier=platform_sta_identifier).scalar() is not None
if platform_exists:
sensor_platform = pg_session.query(Platform.id) \
.filter(Platform.sta_identifier == platform_sta_identifier) \
.first()
visual_perception_dataset.fk_platform_id = sensor_platform.id
format_exists: bool = pg_session.query(Format.id).filter_by(
definition="http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_TextObservation"
).scalar() is not None
if format_exists:
sensor_format = pg_session.query(Format.id) \
.filter(Format.definition == "http://www.opengis.net/def/observationType/OGC-OM/2.0/OM_TextObservation") \
.first()
visual_perception_dataset.fk_format_id = sensor_format.id
# import all the images for the given sensor names
import_images(visual_perception_dataset, pg_session)
# save first and last values of all the observations
first_observation: Observation = pg_session.query(Observation) \
.filter(Observation.fk_dataset_id == visual_perception_dataset.id) \
.order_by(asc('sampling_time_start')) \
.first()
if first_observation is not None:
visual_perception_dataset.first_time = first_observation.sampling_time_start
# visual_perception_dataset.first_value = first_observation.value_quantity
visual_perception_dataset.fk_first_observation_id = first_observation.id
last_observation: Observation = pg_session.query(Observation) \
.filter(Observation.fk_dataset_id == visual_perception_dataset.id) \
.order_by(desc('sampling_time_start')) \
.first()
if last_observation is not None:
visual_perception_dataset.last_time = last_observation.sampling_time_start
# visual_perception_dataset.last_value = last_observation.value_quantity
visual_perception_dataset.fk_last_observation_id = last_observation.id
pg_session.commit()
pg_session.close()
def import_images(dataset: Dataset, pg_session):
''' main method '''
folder_path = 'C:/Users/kaiarn/Documents/Fotos'
# img_filename = '_DSC9548.JPG'
# img_path = f'{folder_path}/{img_filename}'
# Get the list of image files in the directory that exifread supports
directory = os.listdir(folder_path)
for file_name in directory:
if file_name.endswith(('jpg', 'JPG', 'png', 'PNG', 'tiff', 'TIFF')):
file_path = os.path.join(folder_path, file_name)
# print(file_path)
img_file = open(file_path, 'rb')
img: Image = Image(img_file)
if img.has_exif:
info = f" has the EXIF {img.exif_version}"
else:
info = "does not contain any EXIF information"
# print(f"Image {img_file.name}: {info}")
# Original datetime that image was taken (photographed)
# print(f'DateTime (Original): {img.get("datetime_original")}')
datetime_original = img.get("datetime_original")
# Grab the date
date_obj = datetime.strptime(
datetime_original, '%Y:%m:%d %H:%M:%S')
# print(date_obj)
create_observation(dataset, date_obj, file_name)
pg_session.commit()
def create_observation(dataset: Dataset, datetime_original, file_name):
"""
This function creates a new observation in the people structure
based on the passed-in observation data
:param observation: person to create in people structure
:return: 201 on success, observation on person exists
"""
# deserialize to python object
new_observation: Observation = Observation()
# new_observation.id = max_id
new_observation.sta_identifier = str(uuid.uuid4())
new_observation.result_time = datetime_original
new_observation.sampling_time_start = new_observation.result_time
new_observation.sampling_time_end = new_observation.result_time
new_observation.value_type = "text"
new_observation.value_text = "https://geomon.geologie.ac.at/images/" + file_name
new_observation.fk_dataset_id = dataset.id
# Add the person to the database
dataset.observations.append(new_observation)
# db_session.commit()
if __name__ == "__main__":
# load_dotenv(find_dotenv())
# print('sensors: {}'.format(os.environ.get(
# 'GLASFASER_GSCHLIEFGRABEN_SENSORS', [])))
main()
# print(img.list_all())
# print(img.has_exif)
# # Make of device which captured image: NIKON CORPORATION
# print(f'Make: {img.get("make")}')
# # Model of device: NIKON D7000
# print(f'Model: {img.get("model")}')
# # Software involved in uploading and digitizing image: Ver.1.04
# print(f'Software: {img.get("software")}')
# # Name of photographer who took the image: not defined
# print(f'Artist: {img.get("artist")}')
# # Original datetime that image was taken (photographed)
# print(f'DateTime (Original): {img.get("datetime_original")}')
# # Details of flash function
# print(f'Flash Details: {img.get("flash")}')
# print(f"Coordinates - Image")
# print("---------------------")
# print(f"Latitude: {img.copyright} {img.get('gps_latitude_ref')}")
# print(f"Longitude: {img.get('gps_longitude')} {img.get('gps_longitude_ref')}\n")

View file

@ -1,78 +0,0 @@
'''
Sqlalchemy version: 1.2.15
Python version: 3.7
'''
import os
from datetime import datetime
from exif import Image
def main():
''' main method '''
folder_path = 'C:/Users/kaiarn/Documents/Fotos'
# img_filename = '_DSC9548.JPG'
# img_path = f'{folder_path}/{img_filename}'
# Get the list of image files in the directory that exifread supports
directory = os.listdir(folder_path)
for files in directory:
if files.endswith(('jpg', 'JPG', 'png', 'PNG', 'tiff', 'TIFF')):
file_path = os.path.join(folder_path, files)
# print(file_path)
img_file = open(file_path, 'rb')
img: Image = Image(img_file)
if img.has_exif:
info = f" has the EXIF {img.exif_version}"
else:
info = "does not contain any EXIF information"
print(f"Image {img_file.name}: {info}")
# Original datetime that image was taken (photographed)
# print(f'DateTime (Original): {img.get("datetime_original")}')
datetime_original = img.get("datetime_original")
# print(datetime_original)
# Grab the date
date_obj = datetime.strptime(
datetime_original, '%Y:%m:%d %H:%M:%S')
print(date_obj)
# print(f"Longitude: {img.get('gps_longitude')} {img.get('gps_longitude_ref')}\n")
# with open(img_path, 'rb') as img_file:
# img = Image(img_file)
# if img.has_exif:
# info = f" has the EXIF {img.exif_version}"
# else:
# info = "does not contain any EXIF information"
# print(f"Image {img_file.name}: {info}")
# print(img.list_all())
# print(img.has_exif)
# # Make of device which captured image: NIKON CORPORATION
# print(f'Make: {img.get("make")}')
# # Model of device: NIKON D7000
# print(f'Model: {img.get("model")}')
# # Software involved in uploading and digitizing image: Ver.1.04
# print(f'Software: {img.get("software")}')
# # Name of photographer who took the image: not defined
# print(f'Artist: {img.get("artist")}')
# # Original datetime that image was taken (photographed)
# print(f'DateTime (Original): {img.get("datetime_original")}')
# # Details of flash function
# print(f'Flash Details: {img.get("flash")}')
# print(f"Coordinates - Image")
# print("---------------------")
# print(f"Latitude: {img.copyright} {img.get('gps_latitude_ref')}")
# print(f"Longitude: {img.get('gps_longitude')} {img.get('gps_longitude_ref')}\n")
if __name__ == "__main__":
# load_dotenv(find_dotenv())
# print('sensors: {}'.format(os.environ.get(
# 'GLASFASER_GSCHLIEFGRABEN_SENSORS', [])))
main()