Skip to content

Core

DHyDAMO Core API

drr

DRRModel

Main data structure for RR-model in DflowFM. Contains subclasses for unpaved, paved,greehouse and open water nodes and external forcings (seepage, precipitation, evaporation)

Source code in hydrolib/dhydamo/core/drr.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
class DRRModel:
    """Main data structure for RR-model in DflowFM. Contains subclasses
    for unpaved, paved,greehouse and open water nodes and external forcings (seepage, precipitation, evaporation)
    """

    def __init__(self):
        """Initialize RR instances and arrays"""
        self.d3b_parameters = {}

        self.unpaved = Unpaved(self)

        self.paved = Paved(self)

        self.greenhouse = Greenhouse(self)

        self.openwater = Openwater(self)

        self.external_forcings = ExternalForcings(self)

        self.dimr_path = ""

    @validate_arguments
    def read_raster(self, file: str | Path, static: bool = False) -> tuple:
        """
        Method to read a raster. All rasterio types are accepted, plus IDF: in that case the iMod-package is used to read the IDF raster (IDF is cusomary for MODFLOW/SIMGRO models.)

        Parameters
        ----------
        file : raster

        static : BOOL, optional
            If static than no time information needs to be deduced.

        Returns
        -------
        rasterio grid and an affine object.

        """
        if isinstance(file, str):
            filename = Path(file)
        else:
            filename = file

        if not static:
            time = pd.Timestamp(os.path.split(file)[1].split("_")[1].split(".")[0])

        dataset = rasterio.open(filename)
        affine = dataset.transform
        grid = dataset.read(1)

        if static:
            return grid, affine
        else:
            return grid, affine, time
__init__()

Initialize RR instances and arrays

Source code in hydrolib/dhydamo/core/drr.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def __init__(self):
    """Initialize RR instances and arrays"""
    self.d3b_parameters = {}

    self.unpaved = Unpaved(self)

    self.paved = Paved(self)

    self.greenhouse = Greenhouse(self)

    self.openwater = Openwater(self)

    self.external_forcings = ExternalForcings(self)

    self.dimr_path = ""
read_raster(file: str | Path, static: bool = False) -> tuple

Method to read a raster. All rasterio types are accepted, plus IDF: in that case the iMod-package is used to read the IDF raster (IDF is cusomary for MODFLOW/SIMGRO models.)

Parameters

file : raster

BOOL, optional

If static than no time information needs to be deduced.

Returns

rasterio grid and an affine object.

Source code in hydrolib/dhydamo/core/drr.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
@validate_arguments
def read_raster(self, file: str | Path, static: bool = False) -> tuple:
    """
    Method to read a raster. All rasterio types are accepted, plus IDF: in that case the iMod-package is used to read the IDF raster (IDF is cusomary for MODFLOW/SIMGRO models.)

    Parameters
    ----------
    file : raster

    static : BOOL, optional
        If static than no time information needs to be deduced.

    Returns
    -------
    rasterio grid and an affine object.

    """
    if isinstance(file, str):
        filename = Path(file)
    else:
        filename = file

    if not static:
        time = pd.Timestamp(os.path.split(file)[1].split("_")[1].split(".")[0])

    dataset = rasterio.open(filename)
    affine = dataset.transform
    grid = dataset.read(1)

    if static:
        return grid, affine
    else:
        return grid, affine, time

ExternalForcings

Class for external forcings, which contains the boundary conditions and the initial conditions.

Source code in hydrolib/dhydamo/core/drr.py
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
class ExternalForcings:
    """
    Class for external forcings, which contains the boundary
    conditions and the initial conditions.
    """

    def __init__(self, drrmodel):
        # Point to relevant attributes from parent
        self.drrmodel = drrmodel
        self.io = drrreader.ExternalForcingsIO(self)

        self.boundary_nodes = {}
        self.seepage = {}
        self.precip = {}
        self.evap = {}

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_precip(self, id: str, series: pd.Series):
        self.precip[id] = {"precip": series}

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_evap(self, id: str, series: pd.Series):
        self.evap[id] = {"evap": series}

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_seepage(self, id: str, series: pd.Series):
        self.seepage[id] = {"seepage": series}

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_boundary_node(self, id: str, px: str, py: str):
        self.boundary_nodes[id] = {"id": id, "px": px, "py": py}

Greenhouse

Class for greenhouse nodes

Source code in hydrolib/dhydamo/core/drr.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
class Greenhouse:
    """
    Class for greenhouse nodes
    """

    def __init__(self, drrmodel):
        self.drrmodel = drrmodel
        self.gh_nodes = {}

        # Create the io class
        self.io = drrreader.GreenhouseIO(self)

    #    GRHS id ’1’ na 10 ar 1000. 0. 0. 3000. 0. 0. 0. 0. 0. 0. sl 1.0 as 0. sd ’roofstor 1mm’ si
    #    ’silo typ1’ ms ’meteostat1’ is 50.0 grhs
    @validate_arguments
    def add_greenhouse(
        self,
        id: str,
        area: str,
        surface_level: str,
        roof_storage: str,
        basin_storage_class: str,
        meteo_area: str,
        px: str,
        py: str,
        boundary_node: str,
    ) -> None:
        """Add elements of a greenhouse node definition to a dataframe

        Args:
            id (str): catchment id
            area (str): greenhouse node area (m2)
            surface_level (str): surface level (m)
            roof_storage (str): roof storage (mm)
            meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
            px (str): x-coordinate
            py (str): y-coordinante
            boundary_node (str): associated boundary node
        """
        self.gh_nodes[id] = {
            "id": "gh_" + id,
            "ar": area,
            "sl": surface_level,
            "sd": roof_storage,
            "ms": "ms_" + meteo_area,
            "is": "0",
            "px": px,
            "py": py,
            "boundary_node": boundary_node,
            "basin_storage_class": basin_storage_class,
        }
add_greenhouse(id: str, area: str, surface_level: str, roof_storage: str, basin_storage_class: str, meteo_area: str, px: str, py: str, boundary_node: str) -> None

Add elements of a greenhouse node definition to a dataframe

Parameters:

Name Type Description Default
id str

catchment id

required
area str

greenhouse node area (m2)

required
surface_level str

surface level (m)

required
roof_storage str

roof storage (mm)

required
meteo_area str

id of meteo area to which a station in the meteo-file is assigned

required
px str

x-coordinate

required
py str

y-coordinante

required
boundary_node str

associated boundary node

required
Source code in hydrolib/dhydamo/core/drr.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
@validate_arguments
def add_greenhouse(
    self,
    id: str,
    area: str,
    surface_level: str,
    roof_storage: str,
    basin_storage_class: str,
    meteo_area: str,
    px: str,
    py: str,
    boundary_node: str,
) -> None:
    """Add elements of a greenhouse node definition to a dataframe

    Args:
        id (str): catchment id
        area (str): greenhouse node area (m2)
        surface_level (str): surface level (m)
        roof_storage (str): roof storage (mm)
        meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
        px (str): x-coordinate
        py (str): y-coordinante
        boundary_node (str): associated boundary node
    """
    self.gh_nodes[id] = {
        "id": "gh_" + id,
        "ar": area,
        "sl": surface_level,
        "sd": roof_storage,
        "ms": "ms_" + meteo_area,
        "is": "0",
        "px": px,
        "py": py,
        "boundary_node": boundary_node,
        "basin_storage_class": basin_storage_class,
    }

Openwater

Class for open water nodes

Source code in hydrolib/dhydamo/core/drr.py
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
class Openwater:
    """
    Class for open water nodes
    """

    def __init__(self, drrmodel):
        self.drrmodel = drrmodel
        self.ow_nodes = {}

        # Create the io class
        self.io = drrreader.OpenwaterIO(self)

    @validate_arguments
    def add_openwater(
        self, id: str, area: str, meteo_area: str, px: str, py: str, boundary_node: str
    ) -> None:
        """Add elements of an open water node definition to a dataframe

        Args:
            id (str): catchment id
            area (str): greenhouse node area (m2)
            meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
            px (str): x-coordinate
            py (str): y-coordinante
            boundary_node (str): associated boundary node
        """
        self.ow_nodes[id] = {
            "id": "ow_" + id,
            "ar": area,
            "ms": "ms_" + meteo_area,
            "px": px,
            "py": py,
            "boundary_node": boundary_node,
        }
add_openwater(id: str, area: str, meteo_area: str, px: str, py: str, boundary_node: str) -> None

Add elements of an open water node definition to a dataframe

Parameters:

Name Type Description Default
id str

catchment id

required
area str

greenhouse node area (m2)

required
meteo_area str

id of meteo area to which a station in the meteo-file is assigned

required
px str

x-coordinate

required
py str

y-coordinante

required
boundary_node str

associated boundary node

required
Source code in hydrolib/dhydamo/core/drr.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
@validate_arguments
def add_openwater(
    self, id: str, area: str, meteo_area: str, px: str, py: str, boundary_node: str
) -> None:
    """Add elements of an open water node definition to a dataframe

    Args:
        id (str): catchment id
        area (str): greenhouse node area (m2)
        meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
        px (str): x-coordinate
        py (str): y-coordinante
        boundary_node (str): associated boundary node
    """
    self.ow_nodes[id] = {
        "id": "ow_" + id,
        "ar": area,
        "ms": "ms_" + meteo_area,
        "px": px,
        "py": py,
        "boundary_node": boundary_node,
    }

Paved

Class for paved nodes.

Source code in hydrolib/dhydamo/core/drr.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
class Paved:
    """
    Class for paved nodes.
    """

    def __init__(self, drrmodel):
        # Point to relevant attributes from parent
        self.drrmodel = drrmodel
        self.pav_nodes = {}

        # Create the io class
        self.io = drrreader.PavedIO(self)

        self.node_geom = {}
        self.link_geom = {}

    # PAVE id 'pav_Nde_n003' ar 16200 lv 1 sd '1' ss 0 qc 0 1.94E-05 0 qo 2 2 ms 'Station1' aaf 1 is 0 np 0 dw '1' ro 0 ru 0 qh '' pave#
    @validate_arguments
    def add_paved(
        self,
        id: str,
        area: str,
        surface_level: str,
        street_storage: str,
        sewer_storage: str,
        pump_capacity: str,
        meteo_area: str,
        px: str,
        py: str,
        boundary_node: str,
    ) -> None:
        """Add elements of a paved node definition to a dataframe

        Args:
            id (str): catchment id
            area (str): paved node area (m2)
            surface_level (str): surface level (m)
            street_storage (str): surface storage (mm)
            sewer_storage (str): sewer storage (mm)
            pump_capacity (str): pump capacity (mm/d)
            meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
            px (str): x-coordinate
            py (str): y-coordinante
            boundary_node (str): associated boundary node
        """

        self.pav_nodes[id] = {
            "id": "pav_" + id,
            "ar": area,
            "lv": surface_level,
            "qc": pump_capacity,
            "strs": street_storage,
            "sews": sewer_storage,
            "ms": "ms_" + meteo_area,
            "is": "0",
            "np": "0",
            "ro": "0",
            "ru": "0",
            "px": px,
            "py": py,
            "boundary_node": boundary_node,
        }
add_paved(id: str, area: str, surface_level: str, street_storage: str, sewer_storage: str, pump_capacity: str, meteo_area: str, px: str, py: str, boundary_node: str) -> None

Add elements of a paved node definition to a dataframe

Parameters:

Name Type Description Default
id str

catchment id

required
area str

paved node area (m2)

required
surface_level str

surface level (m)

required
street_storage str

surface storage (mm)

required
sewer_storage str

sewer storage (mm)

required
pump_capacity str

pump capacity (mm/d)

required
meteo_area str

id of meteo area to which a station in the meteo-file is assigned

required
px str

x-coordinate

required
py str

y-coordinante

required
boundary_node str

associated boundary node

required
Source code in hydrolib/dhydamo/core/drr.py
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@validate_arguments
def add_paved(
    self,
    id: str,
    area: str,
    surface_level: str,
    street_storage: str,
    sewer_storage: str,
    pump_capacity: str,
    meteo_area: str,
    px: str,
    py: str,
    boundary_node: str,
) -> None:
    """Add elements of a paved node definition to a dataframe

    Args:
        id (str): catchment id
        area (str): paved node area (m2)
        surface_level (str): surface level (m)
        street_storage (str): surface storage (mm)
        sewer_storage (str): sewer storage (mm)
        pump_capacity (str): pump capacity (mm/d)
        meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
        px (str): x-coordinate
        py (str): y-coordinante
        boundary_node (str): associated boundary node
    """

    self.pav_nodes[id] = {
        "id": "pav_" + id,
        "ar": area,
        "lv": surface_level,
        "qc": pump_capacity,
        "strs": street_storage,
        "sews": sewer_storage,
        "ms": "ms_" + meteo_area,
        "is": "0",
        "np": "0",
        "ro": "0",
        "ru": "0",
        "px": px,
        "py": py,
        "boundary_node": boundary_node,
    }

Unpaved

Class for unpaved nodes

Source code in hydrolib/dhydamo/core/drr.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
class Unpaved:
    """
    Class for unpaved nodes
    """

    def __init__(self, drrmodel):
        # Point to relevant attributes from parent
        self.drrmodel = drrmodel

        # initialize a dataframe for every type of nodes related to 'unpaved'
        self.unp_nodes = {}
        self.ernst_defs = {}
        # couple input class
        self.io = drrreader.UnpavedIO(self)

    @validate_arguments
    def add_unpaved(
        self,
        id: str,
        total_area: str,
        lu_areas: str,
        surface_level: str,
        soiltype: str,
        surface_storage: str,
        infiltration_capacity: str,
        initial_gwd: str,
        meteo_area: str,
        px: str,
        py: str,
        boundary_node: str,
    ) -> None:
        """Add elements of an unpaved node definition to a dataframe

        Args:
            id (str): catchment id
            total_area (str): total node area (m2)
            lu_areas (str): area per land use class (space sepaated; m2)
            surface_level (str): surface level (m)
            soiltype (str): soiltype (class id)
            surface_storage (str): surface storage (mm)
            infiltration_capacity (str): iniltration capacity (mm/d)
            initial_gwd (str): intial ground water level below surface (m, positve = below surface)
            meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
            px (str): x-coordinate
            py (str): y-coordinante
            boundary_node (str): associated boundary node
        """
        self.unp_nodes[id] = {
            "id": "unp_" + id,
            "na": "16",
            "ar": lu_areas,
            "ga": total_area,
            "lv": surface_level,
            "co": "3",
            "su": "0",
            "sd": surface_storage,
            "sp": "sep_" + id,
            "ic": infiltration_capacity,
            "ed": "ernst_" + id,
            "bt": soiltype,
            "ig": initial_gwd,
            "mg": surface_level,
            "gl": "1.5",
            "is": "0",
            "ms": "ms_" + meteo_area,
            "px": px,
            "py": py,
            "boundary_node": boundary_node,
        }

    @validate_arguments
    def add_ernst_def(self, id: str, cvo: str, lv: str, cvi: str, cvs: str) -> None:
        """Add properties to a datafframe containing an Ernst definition.

        Args:
            id (str): catchment id
            cvo (str): Drainage resistance, space separated value per layer [d-1]
            lv (str): Layer thickness, space separated value per layer [m]
            cvi (str): Infiltration resistance [d-1]
            cvs (str): Surface runoff resistance [d-1]
        """
        self.ernst_defs[id] = {
            "id": "ernst_" + id,
            "cvi": cvi,
            "cvs": cvs,
            "cvo": cvo,
            "lv": lv,
        }
add_ernst_def(id: str, cvo: str, lv: str, cvi: str, cvs: str) -> None

Add properties to a datafframe containing an Ernst definition.

Parameters:

Name Type Description Default
id str

catchment id

required
cvo str

Drainage resistance, space separated value per layer [d-1]

required
lv str

Layer thickness, space separated value per layer [m]

required
cvi str

Infiltration resistance [d-1]

required
cvs str

Surface runoff resistance [d-1]

required
Source code in hydrolib/dhydamo/core/drr.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
@validate_arguments
def add_ernst_def(self, id: str, cvo: str, lv: str, cvi: str, cvs: str) -> None:
    """Add properties to a datafframe containing an Ernst definition.

    Args:
        id (str): catchment id
        cvo (str): Drainage resistance, space separated value per layer [d-1]
        lv (str): Layer thickness, space separated value per layer [m]
        cvi (str): Infiltration resistance [d-1]
        cvs (str): Surface runoff resistance [d-1]
    """
    self.ernst_defs[id] = {
        "id": "ernst_" + id,
        "cvi": cvi,
        "cvs": cvs,
        "cvo": cvo,
        "lv": lv,
    }
add_unpaved(id: str, total_area: str, lu_areas: str, surface_level: str, soiltype: str, surface_storage: str, infiltration_capacity: str, initial_gwd: str, meteo_area: str, px: str, py: str, boundary_node: str) -> None

Add elements of an unpaved node definition to a dataframe

Parameters:

Name Type Description Default
id str

catchment id

required
total_area str

total node area (m2)

required
lu_areas str

area per land use class (space sepaated; m2)

required
surface_level str

surface level (m)

required
soiltype str

soiltype (class id)

required
surface_storage str

surface storage (mm)

required
infiltration_capacity str

iniltration capacity (mm/d)

required
initial_gwd str

intial ground water level below surface (m, positve = below surface)

required
meteo_area str

id of meteo area to which a station in the meteo-file is assigned

required
px str

x-coordinate

required
py str

y-coordinante

required
boundary_node str

associated boundary node

required
Source code in hydrolib/dhydamo/core/drr.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
@validate_arguments
def add_unpaved(
    self,
    id: str,
    total_area: str,
    lu_areas: str,
    surface_level: str,
    soiltype: str,
    surface_storage: str,
    infiltration_capacity: str,
    initial_gwd: str,
    meteo_area: str,
    px: str,
    py: str,
    boundary_node: str,
) -> None:
    """Add elements of an unpaved node definition to a dataframe

    Args:
        id (str): catchment id
        total_area (str): total node area (m2)
        lu_areas (str): area per land use class (space sepaated; m2)
        surface_level (str): surface level (m)
        soiltype (str): soiltype (class id)
        surface_storage (str): surface storage (mm)
        infiltration_capacity (str): iniltration capacity (mm/d)
        initial_gwd (str): intial ground water level below surface (m, positve = below surface)
        meteo_area (str): id of meteo area to which a station in the meteo-file is assigned
        px (str): x-coordinate
        py (str): y-coordinante
        boundary_node (str): associated boundary node
    """
    self.unp_nodes[id] = {
        "id": "unp_" + id,
        "na": "16",
        "ar": lu_areas,
        "ga": total_area,
        "lv": surface_level,
        "co": "3",
        "su": "0",
        "sd": surface_storage,
        "sp": "sep_" + id,
        "ic": infiltration_capacity,
        "ed": "ernst_" + id,
        "bt": soiltype,
        "ig": initial_gwd,
        "mg": surface_level,
        "gl": "1.5",
        "is": "0",
        "ms": "ms_" + meteo_area,
        "px": px,
        "py": py,
        "boundary_node": boundary_node,
    }

drtc

DRTCModel

Main class to generate RTC-module files.

Source code in hydrolib/dhydamo/core/drtc.py
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
class DRTCModel:
    """Main class to generate RTC-module files."""

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def __init__(
        self,
        hydamo: HyDAMO,
        fm: FMModel,
        output_path: str | Path = None,
        rtc_onlytimeseries: bool = False,
        rtc_timeseriesdata: pd.DataFrame=None,
        complex_controllers_folder: list[str | Path] | str | Path = None,
        id_limit_complex_controllers: list[str] | None = None,
        rtc_timestep: int | float = 60,

    ) -> None:
        """Initialize the DRTCModel.

        Internal controller dictionaries and output paths are initialized, optional
        complex-controller XML is parsed, and RTC template files are copied to the
        output folder.

        Args:
            hydamo (instance of HyDAMO): data structure containing the HyDAMO DAMO2.2
            fm (instance of FMModel): model structure setup for Hydrolib-core
            output_path (str or Path, optional): base path where an `rtc` subfolder
                is created for generated RTC files. Defaults to the current working
                directory.
            rtc_onlytimeseries (bool): if True, build RTC control from
                `rtc_timeseriesdata` only. If True, `complex_controllers_folder` is
                ignored. Defaults to False.
            rtc_timeseriesdata (pd.DataFrame, optional): time series data used when
                `rtc_onlytimeseries=True`. Column names are expected to match
                structure IDs. Defaults to None.
            complex_controllers_folder (list[Path or str] or Path or str, optional):
                folder(s) with custom RTC XML files to import when
                `rtc_onlytimeseries=False`. Defaults to None.
            id_limit_complex_controllers (list[str], optional): whitelist of IDs that
                may be coupled to complex controller logic. Required when
                `complex_controllers_folder` is provided. An empty list means no IDs
                are allowed.
            rtc_timestep (Union[int, float], optional): Time step of the RTC model.
                Defaults to 60 seconds.
        """
        self.hydamo = hydamo

        self.time_settings = {
            "start": pd.to_datetime(fm.time.refdate, format="%Y%m%d"),
            "end": pd.to_datetime(fm.time.refdate, format="%Y%m%d")
            + pd.to_timedelta(fm.time.tstop, unit="s"),
            "step": rtc_timestep,
        }

        self.pid_controllers = {}
        self.time_controllers = {}
        self.interval_controllers = {}

        # set up the output path
        base_output_path = Path(".") if output_path is None else Path(output_path)
        self.output_path = base_output_path / "rtc"
        self.output_path.mkdir(parents=True, exist_ok=True)

        # Save object id by type
        self.struct_ids_by_type = DRTCModel._get_struct_ids_by_type(self.hydamo)

        # parse user-provided controllers
        self.complex_controllers = None
        self.cc_structs = None
        self.cc_ids = None
        self.cc_id_limit = None
        if rtc_onlytimeseries and complex_controllers_folder is not None:
            # User supplied controllers in timeseries_only mode, emit warning
            logger.warning(
                "`complex_controllers_folder` is ignored because `rtc_onlytimeseries=True`. "
                "Set `rtc_onlytimeseries=False` to enable complex controllers."
            )
        elif not rtc_onlytimeseries and complex_controllers_folder is not None:
            if id_limit_complex_controllers is None:
                # When complex_controllers_folder is supplied, the whitelist
                # needs to be supplied as well
                raise SyntaxError(
                    "Missing required `id_limit_complex_controllers` while "
                    "`complex_controllers_folder` is provided. Supply a list of "
                    "allowed IDs to couple to complex controller logic."
                )

            # Discover all complex controller related structures and id's
            self.cc_structs, self.cc_ids = self._load_complex_controller_structs(
                complex_controllers_folder,
                self.struct_ids_by_type,
                log_validation=True,
            )
            logger.info(
                "Found %d complex controller structures referenced in XML: %s",
                len(self.cc_structs),
                self.cc_ids,
            )

            # Save whitelist of allowed controller ids.
            self.cc_id_limit = set(id_limit_complex_controllers)
            if len(self.cc_id_limit) == 0:
                logger.warning(
                    "`id_limit_complex_controllers` is empty. No IDs are allowed, "
                    "so all complex controller references will be filtered out."
                )
            else:
                logger.info(
                    "Applying complex controller ID filter with %d allowed IDs: %s",
                    len(self.cc_id_limit),
                    self.cc_id_limit,
                )

            # Load complex controllers
            self.complex_controllers = self._load_complex_controllers(complex_controllers_folder)

        # copy files from the template RTC-folder
        self.template_dir = Path(__file__).resolve().parent / ".." / "resources" / "RTC"

        generic_files = [p for p in self.template_dir.iterdir() if p.suffix in {".xsd", ".json"}]
        for filepath in generic_files:
            shutil.copy(filepath, self.output_path / filepath.name)

        if rtc_onlytimeseries:
            for name, data in rtc_timeseriesdata.items():
                if name in hydamo.structures.rweirs_df.id.to_list():
                    steering_var = "Crest level (s)"
                if name in hydamo.structures.orifices_df.id.to_list():
                    steering_var = 'Gate lower edge level (s)'
                if name in hydamo.structures.pumps_df.id.to_list():
                    steering_var = 'Capacity (p)'
                self.add_time_controller(
                    structure_id=name, steering_variable=steering_var, data=data
                )
            self.check_timeseries(rtc_timeseriesdata)
            self.complex_controllers  = None

    @validate_arguments
    def allow_struct(
        self,
        cc_id: str,
        allow_observations: bool = False,
        allow_if_filter_inactive: bool = True,
        allow_if_not_referenced: bool = False,
    ) -> bool:
        """Return whether a structure id is allowed by the complex-controller filter.

        Args:
            cc_id (str): Structure or observation id to evaluate.
            allow_observations (bool, optional): If True, pass through observation
                ids that exist in the HyDAMO model. Defaults to False.
            allow_if_filter_inactive (bool, optional): Return value when no complex
                controller filter is active (`cc_ids` or `cc_id_limit` is None).
                Defaults to True.
            allow_if_not_referenced (bool, optional): Return value when filtering is
                active, but `cc_id` is not part of `cc_ids`. Defaults to False.
        """
        if allow_observations and cc_id in self.struct_ids_by_type["observations"]:
            return True

        if self.cc_ids is None or self.cc_id_limit is None:
            return allow_if_filter_inactive

        if cc_id not in self.cc_ids:
            return allow_if_not_referenced

        return cc_id in self.cc_id_limit

    @validate_arguments
    def check_timeseries(self, timeseries):
        hydamo_controllers = self.hydamo.management[~self.hydamo.management.regelmiddelid.isna()].regelmiddelid
        for controller in hydamo_controllers:
            mandev = self.hydamo.management_device[self.hydamo.management_device.globalid ==controller]
            if ~mandev.kunstwerkopeningid.isna().to_numpy()[0]:
                ko = self.hydamo.opening[self.hydamo.opening.globalid ==mandev.kunstwerkopeningid.to_numpy()[0]]
                weir = self.hydamo.weirs[self.hydamo.weirs.globalid ==ko.stuwid.to_numpy()[0]].code.to_numpy()[0]
                if weir not in timeseries.columns:
                    logger.warning(f'For {weir} a controller is defined in hydamo.management, but no timeseries is provided for it.')
            elif ~mandev.duikersifonhevelid.isna().to_numpy()[0]:
                dsh = self.hydamo.culvert[self.hydamo.culvert.globalid ==mandev.duikersifonhevelid.to_numpy()[0]].code
                if dsh not in timeseries.columns:
                    logger.warning(f'For {dsh} a controller is defined in hydamo.management, but no timeseries is provided for it.')
            else:
                logger.warning(f'{mandev.code} is not associated with a management_device or culvert.')
        hydamo_pumps = self.hydamo.management[~self.hydamo.management.pompid.isna()].pompid
        for pump in hydamo_pumps:
            pmp = self.hydamo.pumps[self.hydamo.pumps.globalid ==pump].code.to_numpy()[0]
            if pmp not in timeseries.columns:
                logger.warning(f'For {pmp} a controller is defined in hydamo.management, but no timeseries is provided for it.')

    @validate_arguments
    def parse_complex_controller(
        self, xml_folder: Path | str
    ) -> dict[str, list[str | ET.Element]]:
        """Method to parse user-specified 'complex' controllers

        Args:
            xml_folder (Union[Path, str]): Folder where the user located the custom XML files

        Returns:
            dict: dict of list with the data in the files. Every key is a RTC-file, including the DIMR-config.
        """
        files = [p for p in Path(xml_folder).iterdir() if p.suffix == ".xml"]
        savedict = {
            "dataconfig_import": [],
            "dataconfig_export": [],
            "toolsconfig_rules": [],
            "toolsconfig_triggers": [],
            "timeseries": [],
            "state": [],
            "dimr_config": [],
        }

        handlers = {
            RTC_DATA_CONFIG_XML: self._parse_cc_rtc_dataconfig,
            RTC_TOOLS_CONFIG_XML: self._parse_cc_rtc_toolsconfig,
            TIMESERIES_IMPORT_XML: self._parse_cc_timeseries,
            STATE_IMPORT_XML: self._parse_cc_state,
            "dimr_config.xml": self._parse_cc_dimr_config,
        }

        for filepath in files:
            handler = handlers.get(filepath.name)
            if handler is None:
                continue
            root = ET.parse(filepath).getroot()
            savedict = handler(root, savedict)

        return savedict

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_cc_rtc_dataconfig(
        self, root: ET.Element, savedict: dict[str, list[str | ET.Element]]
    ) -> dict[str, list[str | ET.Element]]:
        children = self._parse_unique_children(root)
        import_series = children.get("importSeries")
        if import_series is not None:
            for num, el in enumerate(import_series):
                xml_text = ET.tostring(el).decode()
                if "PITimeSeries" in xml_text and num == 0:
                    continue
                allow, el_text = self._parse_dataconfig_item(el)
                if not allow:
                    logger.info(
                        f"{RTC_DATA_CONFIG_XML}: Skipped importSeries item for elementId '%s' (not allowed by complex controller filter).",
                        el_text,
                    )
                    continue
                savedict["dataconfig_import"].append(xml_text)

        export_series = children.get("exportSeries")
        if export_series is not None:
            for el in export_series:
                xml_text = ET.tostring(el).decode()
                if "PITimeSeries" in xml_text or "CSVTimeSeries" in xml_text:
                    continue
                allow, el_text = self._parse_dataconfig_item(el)
                if not allow:
                    logger.info(
                        f"{RTC_DATA_CONFIG_XML}: Skipped exportSeries item for elementId '%s' (not allowed by complex controller filter).",
                        el_text,
                    )
                    continue
                savedict["dataconfig_export"].append(xml_text)

        return savedict

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_cc_rtc_toolsconfig(
        self, root: ET.Element, savedict: dict[str, list[str | ET.Element]]
    ) -> dict[str, list[str | ET.Element]]:
        children = self._parse_unique_children(root)

        rules = children.get("rules")
        if rules is not None:
            for el in rules:
                allow, el_text = self._parse_toolsconfig_item(el)
                if not allow:
                    logger.info(
                        f"{RTC_TOOLS_CONFIG_XML}: Skipped rule element '%s' (not allowed by complex controller filter).",
                        el_text,
                    )
                    continue
                savedict["toolsconfig_rules"].append(ET.tostring(el).decode())

        triggers = children.get("triggers")
        if triggers is not None:
            for el in triggers:
                allow, el_text = self._parse_toolsconfig_item(el)
                if not allow:
                    logger.info(
                        f"{RTC_TOOLS_CONFIG_XML}: Skipped trigger element '%s' (not allowed by complex controller filter).",
                        el_text,
                    )
                    continue
                savedict["toolsconfig_triggers"].append(ET.tostring(el).decode())

        return savedict

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_cc_timeseries(
        self, root: ET.Element, savedict: dict[str, list[str | ET.Element]]
    ) -> dict[str, list[str | ET.Element]]:
        for el in root:
            savedict["timeseries"].append(ET.tostring(el).decode())

        return savedict

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_cc_state(
        self, root: ET.Element, savedict: dict[str, list[str | ET.Element]]
    ) -> dict[str, list[str | ET.Element]]:
        for el in root[0]:
            savedict["state"].append(ET.tostring(el).decode())

        return savedict

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_cc_dimr_config(
        self, root: ET.Element, savedict: dict[str, list[str | ET.Element]]
    ) -> dict[str, list[str | ET.Element]]:
        red_root = copy.deepcopy(root)
        for coupler_name, coupler_target in (
            ("rtc_to_flow", "targetName"),
            ("flow_to_rtc", "sourceName"),
        ):
            self._filter_dimr_coupler_items(red_root, coupler_name, coupler_target)
        savedict["dimr_config"].append(red_root)

        return savedict

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _filter_dimr_coupler_items(
        self, red_root: ET.Element, coupler_name: str, coupler_target: str
    ) -> None:
        for coupler in list(red_root):
            if coupler.attrib.get("name") != coupler_name:
                continue
            for sub_el in list(coupler):
                target = sub_el.find(".//{*}" + coupler_target)
                allow, el_text = self._parse_dimr_item(target)
                if allow:
                    continue
                logger.info(
                    "dimr_config.xml: Skipped %s element with '%s' '%s' (not allowed by complex controller filter).",
                    coupler_name,
                    coupler_target,
                    el_text,
                )
                coupler.remove(sub_el)

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def find_complex_controller_ids(
        complex_controllers_folder: list[str | Path] | str | Path,
        hydamo: HyDAMO,
    ) -> set[str]:
        # Do not log validation warnings in this method
        struct_ids_by_type = DRTCModel._get_struct_ids_by_type(hydamo)
        cc_structs, _ = DRTCModel._load_complex_controller_structs(complex_controllers_folder, struct_ids_by_type, log_validation=False)
        # Do not return observation point IDs in this method
        cc_ids = set([cc.struct_name for cc in cc_structs if cc.struct_type != "observations"])

        return cc_ids


    @staticmethod
    @validate_arguments
    def _load_complex_controller_structs(
        complex_controllers_folder: list[str | Path] | str | Path,
        struct_ids_by_type: dict[str, set[str]],
        log_validation: bool = True,
    ) -> tuple[list[DRTCStructure], set[str]]:
        folders = DRTCModel._as_folder_list(complex_controllers_folder)
        cc_structs = DRTCModel._collect_complex_controller_structs(folders)
        cc_structs, cc_ids = DRTCModel._deduplicate_complex_controller_structs(cc_structs)
        cc_structs, cc_ids = DRTCModel._validate_complex_controller_structs(cc_structs, struct_ids_by_type, log_validation)

        return cc_structs, cc_ids

    @staticmethod
    @validate_arguments
    def _as_folder_list(
        complex_controllers_folder: list[str | Path] | str | Path
    ) -> list[str | Path]:
        if isinstance(complex_controllers_folder, list):
            return complex_controllers_folder
        return [complex_controllers_folder]

    @staticmethod
    @validate_arguments
    def _collect_complex_controller_structs(
        folders: list[str | Path]
    ) -> list[DRTCStructure]:
        # Find complex controller structs and referred observation points.
        complex_controller_structs = []
        for folder in folders:
            for filepath in Path(folder).iterdir():
                if filepath.name != "dimr_config.xml":
                    continue
                root = ET.parse(filepath).getroot()
                complex_controller_structs.extend(DRTCModel._parse_referenced_structures(root))
        return complex_controller_structs

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _deduplicate_complex_controller_structs(
        complex_controller_structs: list[DRTCStructure]
    ) -> tuple[list[DRTCStructure], set[str]]:
        # Observation points can be referenced multiple times, but we keep one entry.
        # Other structures can only be defined once.
        duplicates = []
        unique_structs = {}
        for fs in complex_controller_structs:
            if fs.struct_name not in unique_structs:
                unique_structs[fs.struct_name] = fs
                continue
            if fs.struct_type != "observations":
                duplicates.append(fs.struct_name)

        if duplicates:
            msg = f"Duplicate complex controller ids found: {duplicates}"
            logger.error(msg)
            raise ValueError(msg)

        deduplicated_structs = list(unique_structs.values())
        complex_controller_ids = set(unique_structs.keys())
        return deduplicated_structs, complex_controller_ids

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _validate_complex_controller_structs(
        complex_controller_structs: list[DRTCStructure],
        struct_ids_by_type: dict[str, set[str]],
        log_validation: bool
    ) -> tuple[list[DRTCStructure], set[str]]:
        validated_cc_structs = []
        for fs in complex_controller_structs:
            if fs.struct_type not in struct_ids_by_type or fs.struct_name not in struct_ids_by_type[fs.struct_type]:
                msg = f"Complex controller structure not found in HyDAMO, will not be used: {fs.struct_type}/{fs.struct_name}"
                if log_validation:
                    logger.warning(msg)
            else:
                validated_cc_structs.append(fs)
        validated_cc_ids = set([fs.struct_name for fs in validated_cc_structs])

        return validated_cc_structs, validated_cc_ids

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _get_struct_ids_by_type(hydamo: HyDAMO) -> dict[str, set[str]]:
        return {
            "observations": set(hydamo.observationpoints.observation_points.get("name", [])),
            "weirs": set(hydamo.structures.rweirs_df.get("id", []))
            | set(hydamo.structures.uweirs_df.get("id", [])),
            "orifices": set(hydamo.structures.orifices_df.get("id", [])),
            "pumps": set(hydamo.structures.pumps_df.get("id", [])),
            "generalstructures": set(hydamo.structures.generalstructures_df.get("id", [])),
            "culverts": set(hydamo.structures.culverts_df.get("id", [])),
            "bridges": set(hydamo.structures.bridges_df.get("id", [])),
        }

    @validate_arguments
    def _load_complex_controllers(
        self, complex_controllers_folder: list[str | Path] | str | Path,
    ) -> dict[str, list[str | ET.Element]]:
        """Normalize input folders, merge parsed controllers, and validate unique IDs."""
        if isinstance(complex_controllers_folder, list):
            complex_controllers = {}
            for folder in complex_controllers_folder:
                # Merge controller XML fragments from multiple folders.
                controllers = self.parse_complex_controller(Path(folder))
                for key, items in controllers.items():
                    # Append controller XML fragments across folders by section key.
                    complex_controllers.setdefault(key, []).extend(items)
        else:
            complex_controllers = self.parse_complex_controller(Path(complex_controllers_folder))

        # Keep a single merged DIMR root so downstream writers can consume index 0.
        complex_controllers["dimr_config"] = self._merge_dimr_config_roots(
            complex_controllers.get("dimr_config", [])
        )

        return complex_controllers

    @staticmethod
    def get_item_pair(item: ET.Element) -> tuple[str, str] | None:
        source = item.find(".//{*}sourceName")
        target = item.find(".//{*}targetName")
        if source is None or target is None or source.text is None or target.text is None:
            return None
        return source.text, target.text

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _merge_dimr_config_roots(dimr_roots: list[ET.Element]) -> list[ET.Element]:
        """Merge multiple dimr_config roots into one by combining coupler items."""
        if len(dimr_roots) <= 1:
            return dimr_roots

        # Use the first root as canonical structure/template for the merged result.
        # Remove all couplers first
        merged_root = copy.deepcopy(dimr_roots[0])
        for coupler in merged_root.findall("./{*}coupler"):
            merged_root.remove(coupler)

        # Merge couplers and items
        seen_items_by_coupler = {}
        for root in dimr_roots:
            for coupler in root.findall("./{*}coupler"):
                coupler_name = coupler.attrib.get("name")
                if coupler_name is None:
                    continue

                if coupler_name not in seen_items_by_coupler:
                    # Add this coupler without items
                    mc = copy.deepcopy(coupler)
                    for item in mc.findall("./{*}item"):
                        mc.remove(item)
                    merged_root.append(mc)

                    # Initialize tracking reference
                    seen_items_by_coupler[coupler_name] = {
                        "reference": mc,
                        "items": set(),
                    }

                # Only add unseen coupler items
                for item in coupler.findall("./{*}item"):
                    mitem = copy.deepcopy(item)
                    pair = DRTCModel.get_item_pair(mitem)
                    if pair is None:
                        continue

                    if pair not in seen_items_by_coupler[coupler_name]["items"]:
                        seen_items_by_coupler[coupler_name]["items"].add(pair)
                        seen_items_by_coupler[coupler_name]["reference"].append(mitem)

        return [merged_root]

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_dataconfig_item(self, el: ET.Element) -> tuple[bool, str | None]:
        allow = True
        el_text = None

        # Check if this is a complex controller but not in the whitelist
        # Always allow observation points
        el_id = el.find(".//{*}elementId")
        if el_id is not None:
            el_text = el_id.text
            # In complex-controller fragments: keep observation ids, keep all when no filter is configured,
            # but reject ids that are not part of referenced/validated complex-controller structures.
            allow = self.allow_struct(
                cc_id=el_text,
                allow_observations=True,
                allow_if_filter_inactive=True,
                allow_if_not_referenced=False,
            )

        return allow, el_text

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_toolsconfig_item(self, el: ET.Element) -> tuple[bool, str | None]:
        allow = True
        el_firstchild_text = None

        el_firstchild = next(iter(el), None)
        el_tags = []
        for tag in ["input", "output", "trigger", "condition"]:
            el_tags += el.findall(".//{*}" + tag)
        for el_tag in el_tags:
            for child in el_tag:
                if child.text.startswith(INPUT_PREFIX) or child.text.startswith(OUTPUT_PREFIX):
                    child_text = child.text.replace(INPUT_PREFIX, "").replace(OUTPUT_PREFIX, "")
                    child_text = child_text.split("/")[0]

                    # Check if this is a complex controller but not in the whitelist
                    # Always allow observation points
                    if allow:
                        # In tools fragments: keep observation ids, keep all when no filter is configured,
                        # but reject ids that are not referenced by validated complex-controller structures.
                        allow = self.allow_struct(
                            cc_id=child_text,
                            allow_observations=True,
                            allow_if_filter_inactive=True,
                            allow_if_not_referenced=False,
                        )
                        el_firstchild_text = el_firstchild.get("id")

        return allow, el_firstchild_text


    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_dimr_item(self, target: ET.Element | None) -> tuple[bool, str | None]:
        allow = True
        el_text = None
        if target is None or not target.text:
            return allow, el_text

        parts = target.text.split("/")
        if len(parts) < 3:
            return allow, target.text

        _, struct_id, _ = parts

        # Check if this is a complex controller but not in the whitelist
        # For DIMR coupler items: keep observation ids and keep non-complex/non-referenced ids,
        # and only filter out referenced complex-controller ids that are not in the whitelist.
        if not self.allow_struct(
            cc_id=struct_id,
            allow_observations=True,
            allow_if_filter_inactive=True,
            allow_if_not_referenced=True,
        ):
            allow = False
            el_text = target.text

        return allow, el_text


    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_referenced_structures(root: ET.Element) -> list[DRTCStructure]:
        structures = []
        rtc_to_flow = root.findall(".//{*}coupler[@name='rtc_to_flow']/{*}item/{*}targetName")
        flow_to_rtc = root.findall(".//{*}coupler[@name='flow_to_rtc']/{*}item/{*}sourceName")
        for item in rtc_to_flow + flow_to_rtc:
            structures.append(DRTCStructure(*item.text.split("/")))

        return structures

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _parse_unique_children(root: ET.Element):
        # Store xml sections by tag, this function assumes unique children.
        children = {}
        for child in root:
            # remove namespace from tag
            tag = child.tag
            if tag.startswith("{"):
                tag = tag.split("}")[1]
            # Sanity check: ensure that tag does not exist yet
            if tag in children:
                raise KeyError(f"Duplicate tag '{tag}'")
            children[tag] = child

        return children

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _strip_namespace(tag: str) -> str:
        return tag.split("}", 1)[-1] if tag.startswith("{") else tag

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _dataconfig_timeseries_key(el: ET.Element) -> str | None:
        if DRTCModel._strip_namespace(el.tag) != "timeSeries":
            return None
        return el.attrib.get("id")

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _timeseries_series_key(el: ET.Element) -> str | None:
        if DRTCModel._strip_namespace(el.tag) != "series":
            return None
        location = el.find("./{*}header/{*}locationId")
        parameter = el.find("./{*}header/{*}parameterId")
        if location is None or parameter is None:
            return None
        if location.text is None or parameter.text is None:
            return None
        return f"{location.text}|{parameter.text}"

    @staticmethod
    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _state_leaf_key(el: ET.Element) -> str | None:
        if DRTCModel._strip_namespace(el.tag) != "treeVectorLeaf":
            return None
        return el.attrib.get("id")

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def _append_unique_elements(
        self,
        parent: ET.Element,
        elements: list[str | ET.Element],
        key_getter: Callable[[ET.Element], str | None],
        file_label: str,
    ) -> None:
        seen_keys = set()
        seen_raw_xml = set()

        for child in list(parent):
            key = key_getter(child)
            if key is not None:
                seen_keys.add(key)
            else:
                seen_raw_xml.add(ET.tostring(child, encoding="unicode"))

        for item in elements:
            element = ET.fromstring(item) if isinstance(item, str) else copy.deepcopy(item)
            key = key_getter(element)
            if key is not None:
                if key in seen_keys:
                    logger.warning("%s: Skipped writing %s, id already present", file_label, key)
                    continue
                seen_keys.add(key)
                parent.append(element)
                continue

            raw_xml = ET.tostring(element, encoding="unicode")
            if raw_xml in seen_raw_xml:
                logger.warning("%s: Skipped writing duplicate XML fragment", file_label)
                continue
            seen_raw_xml.add(raw_xml)
            parent.append(element)

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def from_hydamo(
        self, pid_settings: dict | None=None, interval_settings: dict | None=None, timeseries: pd.DataFrame | None=None
    ) -> None:
        """Function to convert HyDAMO management data to controller-dictionaries. So far only time- and PID-controllers are implemented. PID settings can be specified globally or per structdure.

        Args:
            pid_settings (dict): RTC settings for PID controllers that are not in the HyDAMO format.
            interval_settings (dict): RTC settings for interval controllers that are not in the HyDAMO format.
            timeseries (pandas.Series): timeseries that are input to timecontrollers.

        Raises:
            ValueError: errors are raised for inconsistent input data.

        """
        for _, management in self.hydamo.management.iterrows():
            # first get the structure ID through the coupled items. It can so far be three different structure types.
            if not pd.isna(management.regelmiddelid):
                weir_code = management.stuwid

                if weir_code in list(self.hydamo.structures.rweirs_df.id):
                    weir = self.hydamo.structures.rweirs_df[
                        self.hydamo.structures.rweirs_df.id == weir_code
                    ]
                elif not self.hydamo.structures.uweirs_df.empty and weir_code in list(self.hydamo.structures.uweirs_df.id):
                    weir = self.hydamo.structures.uweirs_df[
                        self.hydamo.structures.uweirs_df.id == weir_code
                    ]
                elif not self.hydamo.structures.orifices_df.empty and  weir_code in list(self.hydamo.structures.orifices_df.id):
                    weir = self.hydamo.structures.orifices_df[
                        self.hydamo.structures.orifices_df.id == weir_code
                    ]
                else:
                    logger.warning(
                        f"Management for management_device {management.regelmiddelid} could not be connnected to a structure. Skipping it."
                    )
                    continue
                struc_id = weir.id.to_numpy()[0]
            elif not pd.isna(management.pompid):
                if not self.hydamo.pumps.empty and management.pompid in list(self.hydamo.pumps.globalid):
                    struc_id = self.hydamo.pumps[self.hydamo.pumps.globalid == management.pompid].code.to_numpy()[0]
            else:
                raise ValueError(
                    "Only management_devices and pumps can be connected to a management object."
                )
            if management.stuurvariabele == "bovenkant afsluitmiddel":
                steering_variable = "Crest level (s)"
            elif management.stuurvariabele == "hoogte opening":
                steering_variable = "Gate lower edge level (s)"
            elif management.stuurvariabele == "pompdebiet":
                steering_variable = "Capacity (p)"
            else:
                raise ValueError(
                    f"Invalid value for steering variable of {struc_id}: {management.stuurvariabele}."
                )

            if management.doelvariabele == "waterstand":
                target_variable = "Water level (op)"
            elif management.doelvariabele == "debiet":
                target_variable = "Discharge (op)"
            else:
                raise ValueError(
                    f"Invalid value for target variable of {struc_id}: {management.doelvariabele}."
                )

            if management.typecontroller == "PID":
                #  if the ID is not specified separately, use the global settings
                if pid_settings is None:
                    raise ValueError(f'{management.code} contains a PID controller, but no pid_settings are provided. Please do so.')
                if struc_id not in pid_settings:
                    ki = pid_settings["global"]["ki"]
                    kp = pid_settings["global"]["kp"]
                    kd = pid_settings["global"]["kd"]
                    max_speed = pid_settings["global"]["maxspeed"]
                else:
                    ki = pid_settings[struc_id]['ki']
                    kp = pid_settings[struc_id]['kp']
                    kd = pid_settings[struc_id]['kd']
                    max_speed = pid_settings[struc_id]['maxspeed']

                self.add_pid_controller(
                    structure_id=struc_id,
                    steering_variable=steering_variable,
                    target_variable=target_variable,
                    ki=ki,
                    kp=kp,
                    kd=kd,
                    max_speed=max_speed,
                    setpoint=management.streefwaarde,
                    lower_bound=management.ondergrens,
                    upper_bound=management.bovengrens,
                    observation_location=management.meetlocatieid,
                )

            elif management.typecontroller == "interval":
                if interval_settings is None:
                    raise ValueError(f'{management.code} contains an interval controller, but no interval_settings are provided. Please do so.')

                if struc_id not in interval_settings:
                    deadband = interval_settings["global"]["deadband"]
                    max_speed = interval_settings["global"]["maxspeed"]
                else:
                    deadband = interval_settings[struc_id]['deadband']
                    max_speed = interval_settings[struc_id]['maxspeed']

                self.add_interval_controller(
                    structure_id=struc_id,
                    steering_variable=steering_variable,
                    target_variable=target_variable,
                    deadband=deadband,
                    setting_above=management.bovengrens,
                    setting_below=management.ondergrens,
                    max_speed=max_speed,
                    setpoint=management.streefwaarde,
                    observation_location=management.meetlocatieid,
                )

            elif management.typecontroller == "time":
                if timeseries is None:
                     raise ValueError(f'{management.code} contains a time controller, but no time series are provided. Please do so.')
                else:
                    data = timeseries.loc[:, struc_id]
                    self.add_time_controller(
                        structure_id=struc_id,
                        steering_variable=steering_variable,
                        data=data,
                    )
            else:
                logger.warning(
                    f"{management.typecontroller} is not a valid controller type - skipped."
                )

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_time_controller(
        self,
        structure_id: str = None,
        steering_variable: str = None,
        data: pd.Series = None,
        interpolation_option: str = 'LINEAR',
        extrapolation_option: str = 'BLOCK',
    ) -> None:
        """Functon to add a time controller to a certain structure.

        Args:
            structure_id (str): structure id.
            steering_variable (str): variable that is controlled, usually crest level.
            data (pd.Series): timeseries.
            interpolation_option (str): interpolation option used.
            extrapolation_option (str): extrapolation option used.
        """
        self.time_controllers[structure_id] = {
            "type": "Time",
            "data": data,
            "steering_variable": steering_variable,
            "interpolation_option": interpolation_option,
            "extrapolation_option": extrapolation_option,
        }

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_pid_controller(
        self,
        structure_id: str = None,
        steering_variable: str = None,
        target_variable: str = None,
        setpoint: float | str | pd.Series = None,
        lower_bound: float | str = None,
        upper_bound: float | str = None,
        observation_location: str = None,
        ki: float = 0.001,
        kp: float = 0.0,
        kd: float = 0.0,
        max_speed: float=0.00033,
        interpolation_option: str = 'LINEAR',
        extrapolation_option: str = 'BLOCK',
    ) -> None:
        """Function a add PID controller.

        Args:
            structure_id (str): structure iD.
            steering_variable (str): variable to be controlled, usually crest level.
            target_variable (str): target variable (usually water level)
            setpoint (Union[float, str, pd.Series]): setpoint value or timeseries of setpointvalue
            lower_bound (Union[float, str]): lowest value to be allowed
            upper_bound (Union[float, str]): highest value to be allowed
            observation_location (str): id of the observation point
            ki (float): gain factor ki
            kp (float): faimn factor kp
            kd (float): gain factor kd
            max_speed (float): maximum speed to change target variable
            interpolation_option (str): interpolation option used
            extrapolation_option (str): extrapolation option used
        """
        self.pid_controllers[structure_id] = {
            "type": "PID",
            "steering_variable": steering_variable,
            "target_variable": target_variable,
            "setpoint": setpoint,
            "observation_point": observation_location,
            "lower_bound": lower_bound,
            "upper_bound": upper_bound,
            "ki": ki,
            "kp": kp,
            "kd": kd,
            'max_speed': max_speed,
            "interpolation_option": interpolation_option,
            "extrapolation_option": extrapolation_option,
        }

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_interval_controller(
        self,
        structure_id: str = None,
        steering_variable: str = None,
        target_variable: str = None,
        deadband: float | str = None,
        setpoint: float | str | pd.Series = None,
        setting_below: float | str = None,
        setting_above: float | str = None,
        max_speed: float | str = None,
        observation_location: str = None,
        interpolation_option: str = 'LINEAR',
        extrapolation_option: str = 'BLOCK',
    ) -> None:
        """Function to add an Interval controller.

        Args:
            structure_id (str): structure iD.
            steering_variable (str): variable to be controlled, usually crest level.
            target_variable (str): target variable (usually water level)
            deadband (float): deadband around the setpoint
            setpoint (Union[float, str, pd.Series]): setpoint value (or timeseries of setpointvalue)
            setting_below (Union[float,str]): value of target variable below setpoint
            setting_above (Union[float, str]): value of target variable above setpoint
            max_speed (Union[float,str]): maximum speed to change target variable
            observation_location (str): id of the observation point
            interpolation_option (str): interpolation option used
            extrapolation_option (str): extrapolation option used
        """
        self.interval_controllers[structure_id] = {
            "type": 'Interval',
            "steering_variable": steering_variable,
            "target_variable": target_variable,
            "setpoint": setpoint,
            "observation_point": observation_location,
            "setting_below": setting_below,
            "setting_above": setting_above,
            "max_speed": max_speed,
            "deadband": deadband,
            "interpolation_option": interpolation_option,
            "extrapolation_option": extrapolation_option,
        }

    @staticmethod
    @validate_arguments
    def finish_file(xmlroot, configfile, filename: Path | str) -> None:
        """Method to finish a XML file in the required namespace and format.

        Args:
            xmlroot: Xml Tree
            configfile : Xml file object
            filename (Union[Path, str]): filepath of the file to be written
        """
        configfile.write(filename)
        xmlstring = (
            bytes(
                '<?xml version="1.0" encoding="utf-8" standalone="yes" ?>',
                encoding="utf-8",
            )
            + ET.tostring(xmlroot)
        )
        xmlstring = xmlstring.decode("utf-8").replace('\n', '').replace("  ","")
        with open(filename, "w+") as f:
            f.write(xmlstring)
        with open(filename) as f:
            temp = xml.dom.minidom.parseString(f.read())
        with open(filename, "w+") as f:
            f.write(temp.toprettyxml())

    def write_xml_v1(self) -> None:
        """Wrapper function to write individual XML files."""
        self.write_runtimeconfig()
        self.write_toolsconfig()
        self.write_timeseries_import()
        self.write_dataconfig()
        self.write_state_import()


    def write_runtimeconfig(self) -> None:
        """Function to write RtcRunTimeConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""

        # namespaces for all other xml files
        generalname = "http://www.wldelft.nl/fews"
        xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
        gn_brackets = "{" + generalname + "}"

        # registering namespaces
        ET.register_namespace("", generalname)
        ET.register_namespace("xsi", xsi_name)

        # parsing xml file to python and get the root of the existing xml file
        configfile = ET.parse(
            os.path.join(self.template_dir, "rtcRuntimeConfig_empty.xml")
        )
        myroot = configfile.getroot()

        # convert date and runtime to required input for runtimeconfig file

        # replace start/stop dates and times in xml file
        for x in myroot.iter(gn_brackets + "startDate"):
            x.set("date", dt.strftime(self.time_settings["start"], format="%Y-%m-%d"))
            x.set("time", dt.strftime(self.time_settings["start"], format="%H:%M:%S"))
        for x in myroot.iter(gn_brackets + "endDate"):
            x.set("date", dt.strftime(self.time_settings["end"], format="%Y-%m-%d"))
            x.set("time", dt.strftime(self.time_settings["end"], format="%H:%M:%S"))
        for x in myroot.iter(gn_brackets + "timeStep"):
            x.set("unit", "second")
            x.set("divider", "1")
            x.set("multiplier", str(int(self.time_settings["step"])))

        # write new xml file
        self.finish_file(myroot, configfile, self.output_path / "rtcRuntimeConfig.xml")

    def write_toolsconfig(self) -> None:
        """Function to write RtcToolsConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
        generalname = "http://www.wldelft.nl/fews"
        xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
        gn_brackets = "{" + generalname + "}"

        # registering namespaces
        ET.register_namespace("", generalname)
        ET.register_namespace("xsi", xsi_name)

        # parsing xml file
        configfile = ET.parse(self.template_dir / "rtcToolsConfig_empty.xml")
        myroot = configfile.getroot()

        self.all_controllers = self.time_controllers.copy()
        self.all_controllers.update(self.pid_controllers)
        self.all_controllers.update(self.interval_controllers)

        to_remove = []
        for key in self.all_controllers.keys():

            controller = self.all_controllers[key]
            # For simple controllers: skip only when this id has an allowed complex controller.
            # If filtering is inactive or the id is not a referenced complex id, do not skip.
            if self.allow_struct(
                cc_id=key,
                allow_observations=False,
                allow_if_filter_inactive=False,
                allow_if_not_referenced=False,
            ):
                logger.warning(
                    f"RtcToolsConfig.xml: Skipped writing {controller['type']} control for {key}, complex controller already present"
                )
                to_remove.append(key)
                continue

            a = ET.SubElement(myroot[1], gn_brackets + "rule")
            if controller['type'] == "PID":

                # rule type (PID)
                b = ET.SubElement(a, gn_brackets + "pid")
                b.set("id", "[PID]" + "Control group " + str(key) + "/PID Rule")

                # standard settings
                c = ET.SubElement(b, gn_brackets + "mode")
                c.text = "PIDVEL"

                d = ET.SubElement(b, gn_brackets + "settingMin")
                d.text = str(controller["lower_bound"])

                e = ET.SubElement(b, gn_brackets + "settingMax")
                e.text = str(controller["upper_bound"])

                f = ET.SubElement(b, gn_brackets + "settingMaxSpeed")
                f.text = str(controller["max_speed"])

                g = ET.SubElement(b, gn_brackets + "kp")
                g.text = str(controller["kp"])

                h = ET.SubElement(b, gn_brackets + "ki")
                h.text = str(controller["ki"])

                i = ET.SubElement(b, gn_brackets + "kd")
                i.text = str(controller["kd"])

                # input
                j = ET.SubElement(b, gn_brackets + "input")

                k = ET.SubElement(j, gn_brackets + "x")
                k.text = (
                    INPUT_PREFIX
                    + controller["observation_point"]
                    + "/"
                    + controller["target_variable"]
                )

                # If setpoint varies in time
                if isinstance(controller["setpoint"], pd.Series):
                    ll = ET.SubElement(j, gn_brackets + "setpointSeries")
                    ll.text = "[SP]" + "Control group " + str(key) + "/PID Rule"
                # Else fixed setpoint
                else:
                    ll = ET.SubElement(j, gn_brackets + "setpointValue")
                    ll.text = str(controller["setpoint"])

                # output
                m = ET.SubElement(b, gn_brackets + "output")

                o = ET.SubElement(m, gn_brackets + "y")
                o.text = OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"]

                p = ET.SubElement(m, gn_brackets + "integralPart")
                p.text = "[IP]" + "Control group " + str(key) + "/PID Rule"

                q = ET.SubElement(m, gn_brackets + "differentialPart")
                q.text = "[DP]" + "Control group " + str(key) + "/PID Rule"

            elif controller['type'] == 'Interval':
                # Interval RTC
                # rule type (Interval)
                b = ET.SubElement(a, gn_brackets + "interval")
                b.set("id", "[IntervalRule]" + "Control group " + str(key) + "/Interval Rule")

                # standard settings
                d = ET.SubElement(b, gn_brackets + "settingBelow")
                d.text = str(controller["setting_below"])

                e = ET.SubElement(b, gn_brackets + "settingAbove")
                e.text = str(controller["setting_above"])

                f = ET.SubElement(b, gn_brackets + "settingMaxSpeed")
                f.text = str(controller["max_speed"])

                g = ET.SubElement(b, gn_brackets + "deadbandSetpointAbsolute")
                g.text = str(controller["deadband"])

                # input
                j = ET.SubElement(b, gn_brackets + "input")

                k = ET.SubElement(j, gn_brackets + "x") # leave ref = "EXPLICIT" out for now
                k.text = (
                    INPUT_PREFIX
                    + controller["observation_point"]
                    + "/"
                    + controller["target_variable"]
                )
                # If setpoint varies in time
                ll = ET.SubElement(j, gn_brackets + "setpoint")
                ll.text = "[SP]" + "Control group " + str(key) + "/Interval Rule"

                # output
                m = ET.SubElement(b, gn_brackets + "output")

                o = ET.SubElement(m, gn_brackets + "y")
                o.text = OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"]

                p = ET.SubElement(m, gn_brackets + "status")
                p.text = "[Status]" + "Control group " + str(key) + "/Interval Rule"
            # Add time rule
            else:
                # rule type (timeabsolute)
                b = ET.SubElement(a, gn_brackets + "timeAbsolute")
                b.set("id", "[TimeRule]" + "Control group " + str(key) + "/Time Rule")

                # input
                c = ET.SubElement(b, gn_brackets + "input")

                d = ET.SubElement(c, gn_brackets + "x")
                d.text = "Control group " + str(key) + "/Time Rule"

                e = ET.SubElement(b, gn_brackets + "output")

                f = ET.SubElement(e, gn_brackets + "y")
                f.text = OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"]

        # remove controllers that have complex controllers
        for key in to_remove:
            del self.all_controllers[key]

        # elements that are parsed from user specified files should be inserted at the right place.
        if self.complex_controllers is not None:
            for ctl in self.complex_controllers["toolsconfig_rules"]:
                myroot[1].append(ET.fromstring(ctl))
            for ctl in self.complex_controllers["toolsconfig_triggers"]:
                # no trigger block present yet
                if len(myroot) == 2:
                    trigger = ET.Element(gn_brackets + "triggers")
                    myroot.append(trigger)
                    myroot[2].append(ET.fromstring(ctl))
                else:
                    myroot[2].append(ET.fromstring(ctl))

        self.finish_file(myroot, configfile, self.output_path / RTC_TOOLS_CONFIG_XML)


    def write_dataconfig(self) -> None:
        """Function to write RtcDataConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
        generalname = "http://www.wldelft.nl/fews"
        xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
        gn_brackets = "{" + generalname + "}"
        m3unit = "m^3/s"
        munit = "m"

        # registering namespaces
        ET.register_namespace("", generalname)
        ET.register_namespace("xsi", xsi_name)

        # Parsing xml file
        configfile = ET.parse(self.template_dir / "rtcDataConfig_empty.xml")
        myroot = configfile.getroot()

        timeseries_length = len(ET.parse(self.output_path / 'timeseries_import.xml').getroot())

        # implementing standard settings import and exportdata
        a0 = ET.SubElement(myroot[1], gn_brackets + "CSVTimeSeriesFile")
        a0.set("decimalSeparator", ".")
        a0.set("delimiter", ",")
        a0.set("adjointOutput", "false")

        a1 = ET.SubElement(myroot[1], gn_brackets + "PITimeSeriesFile")

        a2 = ET.SubElement(a1, gn_brackets + "timeSeriesFile")
        a2.text = "timeseries_export.xml"

        a3 = ET.SubElement(a1, gn_brackets + "useBinFile")
        a3.text = "false"

       # implementing standard settings import and exportdata
        if timeseries_length > 0:
            # only if timeseries are written to the import
            a4 = ET.SubElement(myroot[0], gn_brackets + "PITimeSeriesFile")
            a5 = ET.SubElement(a4, gn_brackets + "timeSeriesFile")
            a5.text = TIMESERIES_IMPORT_XML
            a6 = ET.SubElement(a4, gn_brackets + "useBinFile")
            a6.text = "false"

          # weir dependable data
        for ikey, key in enumerate(self.all_controllers.keys()):

            controller = self.all_controllers[key]
            # For simple controllers: skip only when this id has an allowed complex controller.
            # If filtering is inactive or the id is not a referenced complex id, do not skip.
            if self.allow_struct(
                cc_id=key,
                allow_observations=False,
                allow_if_filter_inactive=False,
                allow_if_not_referenced=False,
            ):
                logger.warning(
                    f"{RTC_DATA_CONFIG_XML}: Skipped writing {controller['type']} control for {key}, complex controller already present"
                )
                continue

            # te importeren data
            if controller['type'] == 'PID':

                input_id = INPUT_PREFIX + controller["observation_point"] + "/" +  controller["target_variable"]

                if myroot[0].find(f".//*[@id='{input_id}']") is None:
                    a = ET.SubElement(myroot[0], gn_brackets + "timeSeries")
                    a.set("id", input_id)

                    b = ET.SubElement(a, gn_brackets + "OpenMIExchangeItem")

                    c = ET.SubElement(b, gn_brackets + "elementId")
                    c.text = controller["observation_point"]

                    d = ET.SubElement(b, gn_brackets + "quantityId")
                    d.text = controller["target_variable"]

                    e = ET.SubElement(b, gn_brackets + "unit")
                    e.text = munit if controller['target_variable'] == 'Water level (op)' else m3unit

                else:
                    logger.warning(f"{RTC_DATA_CONFIG_XML}: Skipped writing {input_id}, observation point already present")

                # If a time dependent setpoint is required, add the Time Rule
                if type(controller['setpoint']) is pd.Series:
                    a2 = ET.SubElement(myroot[0], gn_brackets + "timeSeries")

                    a2.set("id", f"[SP]Control group {key}/PID Rule")
                    b2 = ET.SubElement(a2, gn_brackets + "PITimeSeries")

                    c2 = ET.SubElement(b2, gn_brackets + "locationId")
                    c2.text = f"[PID]Control group {key}/PID Rule"

                    d2 = ET.SubElement(b2, gn_brackets + "parameterId")
                    d2.text = "SP"

                    e2 = ET.SubElement(b2, gn_brackets + "interpolationOption")
                    e2.text = controller['interpolation_option']

                    e2 = ET.SubElement(b2, gn_brackets + "extrapolationOption")  
                    e2.text = controller['extrapolation_option'] # Changed from Block: HL

            elif controller['type'] == 'Interval':               

                input_id = INPUT_PREFIX + controller["observation_point"] + "/" +  controller["target_variable"]
                if myroot.find(f".//*[@id='{input_id}']") is None:
                    a = ET.SubElement(myroot[0], gn_brackets + "timeSeries")

                    a.set("id", input_id)

                    b = ET.SubElement(a, gn_brackets + "OpenMIExchangeItem")

                    c = ET.SubElement(b, gn_brackets + "elementId")
                    c.text = controller["observation_point"]

                    d = ET.SubElement(b, gn_brackets + "quantityId")
                    d.text = controller["target_variable"]

                    e = ET.SubElement(b, gn_brackets + "unit")
                    e.text = munit if controller['target_variable'] == 'Water level (op)' else m3unit
                else:
                    logger.warning(f"{RTC_DATA_CONFIG_XML}: Skipped writing {input_id}, observation point already present")

                if type(controller['setpoint']) is pd.Series:
                    a2 = ET.SubElement(myroot[0], gn_brackets + "timeSeries")

                    a2.set("id", f"[SP]Control group {key}/Interval Rule")
                    b3 = ET.SubElement(a2, gn_brackets + "PITimeSeries")

                    c3 = ET.SubElement(b3, gn_brackets + "locationId")
                    c3.text = f"[IntervalRule]Control group {key}/Interval Rule"

                    d3 = ET.SubElement(b3, gn_brackets + "parameterId")
                    d3.text = "SP"

                    e3 = ET.SubElement(b3, gn_brackets + "interpolationOption")
                    e3.text = controller['interpolation_option']

                    f3 = ET.SubElement(b3, gn_brackets + "extrapolationOption")
                    f3.text = controller['extrapolation_option'] # Changed from Block: HL

            else:
                a = ET.SubElement(myroot[0], gn_brackets + "timeSeries")
                a.set("id", "Control group " + str(key) + "/Time Rule")
                b = ET.SubElement(a, gn_brackets + "PITimeSeries")

                c = ET.SubElement(b, gn_brackets + "locationId")
                c.text = f"[TimeRule]Control group {key}/Time Rule"

                d = ET.SubElement(b, gn_brackets + "parameterId")
                d.text = "TimeSeries"

                e = ET.SubElement(b, gn_brackets + "interpolationOption")
                e.text = controller['interpolation_option']

                e = ET.SubElement(b, gn_brackets + "extrapolationOption")
                e.text = controller['extrapolation_option'] # Changed from Block: HL

            # te exporteren data:
            f = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
            f.set("id", OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"])

            g = ET.SubElement(f, gn_brackets + "OpenMIExchangeItem")

            h = ET.SubElement(g, gn_brackets + "elementId")
            h.text = str(key)

            j = ET.SubElement(g, gn_brackets + "quantityId")
            j.text = controller["steering_variable"]

            k = ET.SubElement(g, gn_brackets + "unit")
            k.text = m3unit if controller["steering_variable"] == 'Capacity (p)' else munit

        for ikey, key in enumerate(self.all_controllers.keys()):
            controller = self.all_controllers[key]

            if controller['type'] == 'PID':
                i = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
                i.set("id", "[IP]Control group " + str(key) + "/PID Rule")

                j = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
                j.set("id", "[DP]Control group " + str(key) + "/PID Rule")

            elif controller['type'] == 'Interval': # Change slightly when working with Interval rule
                j = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
                j.set("id", "[Status]Control group " + str(key) + "/Interval Rule")

        # the parsed complex controllers should be inserted at the right place
        if self.complex_controllers is not None:
            self._append_unique_elements(
                parent=myroot[0],
                elements=self.complex_controllers["dataconfig_import"],
                key_getter=self._dataconfig_timeseries_key,
                file_label=RTC_DATA_CONFIG_XML,
            )
            self._append_unique_elements(
                parent=myroot[1],
                elements=self.complex_controllers["dataconfig_export"],
                key_getter=self._dataconfig_timeseries_key,
                file_label=RTC_DATA_CONFIG_XML,
            )

        self.finish_file(myroot, configfile, self.output_path / RTC_DATA_CONFIG_XML)

    def write_timeseries_import(self) -> None:
        """Function to write timeseries_import.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
        generalname = "http://www.wldelft.nl/fews/PI"
        xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
        gn_brackets = "{" + generalname + "}"

        # registering namespaces
        ET.register_namespace("", generalname)
        ET.register_namespace("xsi", xsi_name)

        # Parsing xml file
        configfile = ET.parse(self.template_dir / "timeseries_import_empty.xml")
        myroot = configfile.getroot()

        for key in self.all_controllers.keys():

            controller = self.all_controllers[key]
            # For simple controllers: skip only when this id has an allowed complex controller.
            # If filtering is inactive or the id is not a referenced complex id, do not skip.
            if self.allow_struct(
                cc_id=key,
                allow_observations=False,
                allow_if_filter_inactive=False,
                allow_if_not_referenced=False,
            ):
                logger.warning(
                    f"{TIMESERIES_IMPORT_XML}: Skipped writing {controller['type']} control for {key}, complex controller already present"
                )
                continue

            if controller['type'] == 'Time':
                # te importeren data
                dates = pd.to_datetime(controller["data"].index).strftime("%Y-%m-%d")
                times = pd.to_datetime(controller["data"].index).strftime("%H:%M:%S")
                timestep = (
                    pd.to_datetime(controller["data"].index)[1]
                    - pd.to_datetime(controller["data"].index)[0]
                ).total_seconds()
                a = ET.SubElement(myroot, gn_brackets + "series")
                b = ET.SubElement(a, gn_brackets + "header")
                c = ET.SubElement(b, gn_brackets + "type")
                c.text = "instantaneous"
                d = ET.SubElement(b, gn_brackets + "locationId")
                d.text = f"[TimeRule]Control group {key}/Time Rule"
                e = ET.SubElement(b, gn_brackets + "parameterId")
                e.text = "TimeSeries"
                f = ET.SubElement(b, gn_brackets + "timeStep")
                f.attrib = {
                    "unit": "minute",
                    "multiplier": str(int(timestep / 60.0)),
                    "divider": str(1),
                }
                g = ET.SubElement(b, gn_brackets + "startDate")
                g.attrib = {"date": dates[0], "time": times[0]}
                h = ET.SubElement(b, gn_brackets + "endDate")
                h.attrib = {"date": dates[-1], "time": times[-1]}
                i = ET.SubElement(b, gn_brackets + "missVal")
                i.text = "-999.0"
                j = ET.SubElement(b, gn_brackets + "stationName")
                j.text = ""
                for i in range(len(controller["data"])):
                    k = ET.SubElement(a, gn_brackets + "event")
                    k.attrib = {
                        "date": dates[i],
                        "time": times[i],
                        "value": str(controller["data"].to_numpy()[i]),
                    }
            elif controller['type'] == "Interval":
                if isinstance(controller['setpoint'], float):
                    controller['setpoint'] = pd.Series([controller['setpoint'],controller['setpoint']], index=[self.time_settings['start'],self.time_settings['end']])

                # te importeren data
                dates = pd.to_datetime( controller["setpoint"].index).strftime("%Y-%m-%d")
                times = pd.to_datetime(controller["setpoint"].index).strftime("%H:%M:%S")
                timestep = (pd.to_datetime(f'{dates[1]} {times[1]}') - pd.to_datetime(f'{dates[0]} {times[0]}')).total_seconds()

                a = ET.SubElement(myroot, gn_brackets + "series")
                b = ET.SubElement(a, gn_brackets + "header")
                c = ET.SubElement(b, gn_brackets + "type")
                c.text = "instantaneous"

                d = ET.SubElement(b, gn_brackets + "locationId")
                d.text = f"[IntervalRule]Control group {key}/Interval Rule"

                e = ET.SubElement(b, gn_brackets + "parameterId")
                e.text = "SP"
                f = ET.SubElement(b, gn_brackets + "timeStep")
                f.attrib = {
                    "unit": "minute",
                    "multiplier": str(int(timestep / 60.0)),
                    "divider": str(1),
                }
                g = ET.SubElement(b, gn_brackets + "startDate")
                g.attrib = {"date": dates[0], "time": times[0]}
                h = ET.SubElement(b, gn_brackets + "endDate")
                h.attrib = {"date": dates[-1], "time": times[-1]}
                i = ET.SubElement(b, gn_brackets + "missVal")
                i.text = "-999.0"
                for i in range(len(controller["setpoint"])):
                    k = ET.SubElement(a, gn_brackets + "event")
                    k.attrib = {
                        "date": dates[i],
                        "time": times[i],
                        "value": str(controller["setpoint"].to_numpy()[i]),
                    }

            # Create a timeseries import if a time-dependent setpoint is used
            elif controller['type'] == 'PID' and isinstance(controller['setpoint'], pd.Series):
                # te importeren data
                dates = pd.to_datetime(controller["setpoint"].index).strftime("%Y-%m-%d")
                times = pd.to_datetime(controller["setpoint"].index).strftime("%H:%M:%S")
                timestep = (
                    pd.to_datetime(controller["setpoint"].index)[1]
                    - pd.to_datetime(controller["setpoint"].index)[0]
                ).total_seconds()
                a = ET.SubElement(myroot, gn_brackets + "series")
                b = ET.SubElement(a, gn_brackets + "header")
                c = ET.SubElement(b, gn_brackets + "type")
                c.text = "instantaneous"

                if controller['type'] =='PID':
                    d = ET.SubElement(b, gn_brackets + "locationId")
                    d.text = f"[PID]Control group {key}/PID Rule"
                elif controller['type'] == 'Interval':
                    d = ET.SubElement(b, gn_brackets + "locationId")
                    d.text = f"[IntervalRule]Control group {key}/Interval Rule"

                e = ET.SubElement(b, gn_brackets + "parameterId")
                e.text = "SP"
                f = ET.SubElement(b, gn_brackets + "timeStep")
                f.attrib = {
                    "unit": "minute",
                    "multiplier": str(int(timestep / 60.0)),
                    "divider": str(1),
                }
                g = ET.SubElement(b, gn_brackets + "startDate")
                g.attrib = {"date": dates[0], "time": times[0]}
                h = ET.SubElement(b, gn_brackets + "endDate")
                h.attrib = {"date": dates[-1], "time": times[-1]}
                i = ET.SubElement(b, gn_brackets + "missVal")
                i.text = "-999.0"
                j = ET.SubElement(b, gn_brackets + "stationName")
                j.text = ""
                for i in range(len(controller["setpoint"])):
                    k = ET.SubElement(a, gn_brackets + "event")
                    k.attrib = {
                        "date": dates[i],
                        "time": times[i],
                        "value": str(controller["setpoint"].to_numpy()[i]),
                    }

        if self.complex_controllers is not None:
            self._append_unique_elements(
                parent=myroot,
                elements=self.complex_controllers["timeseries"],
                key_getter=self._timeseries_series_key,
                file_label=TIMESERIES_IMPORT_XML,
            )

        self.finish_file(myroot, configfile, self.output_path / TIMESERIES_IMPORT_XML)

    def write_state_import(self) -> None:
        """Function to write state_import.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
        generalname = "http://www.openda.org"
        xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
        gn_brackets = "{" + generalname + "}"

        # registering namespaces
        ET.register_namespace("", generalname)
        ET.register_namespace("xsi", xsi_name)

        # Parsing xml file
        configfile = ET.parse(self.template_dir / "state_import_empty.xml")
        myroot = configfile.getroot()

        a0 = ET.SubElement(myroot, gn_brackets + "treeVector")

        for key in self.all_controllers.keys():

            controller = self.all_controllers[key]
            # For simple controllers: skip only when this id has an allowed complex controller.
            # If filtering is inactive or the id is not a referenced complex id, do not skip.
            if self.allow_struct(
                cc_id=key,
                allow_observations=False,
                allow_if_filter_inactive=False,
                allow_if_not_referenced=False,
            ):
                logger.warning(
                    f"{STATE_IMPORT_XML}: Skipped writing {controller['type']} control for {key}, complex controller already present"
                )
                continue

            # te importeren data
            a = ET.SubElement(a0, gn_brackets + "treeVectorLeaf")
            a.attrib = {"id": OUTPUT_PREFIX + key + "/" + controller["steering_variable"]}
            b = ET.SubElement(a, gn_brackets + "vector")
            if controller['type'] == 'PID':
                b.text = str(controller["upper_bound"])
            elif controller['type'] == 'Interval':
                b.text = str(max(controller['setting_above'], controller['setting_below'])) # Take the maximum value as a starting value
            else:
                b.text = str(controller["data"].to_numpy()[0])

        # the parsed complex controllers should be inserted at the right place
        if self.complex_controllers is not None:
            self._append_unique_elements(
                parent=myroot[0],
                elements=self.complex_controllers["state"],
                key_getter=self._state_leaf_key,
                file_label=STATE_IMPORT_XML,
            )

        self.finish_file(myroot, configfile, self.output_path / STATE_IMPORT_XML)
__init__(hydamo: HyDAMO, fm: FMModel, output_path: str | Path = None, rtc_onlytimeseries: bool = False, rtc_timeseriesdata: pd.DataFrame = None, complex_controllers_folder: list[str | Path] | str | Path = None, id_limit_complex_controllers: list[str] | None = None, rtc_timestep: int | float = 60) -> None

Initialize the DRTCModel.

Internal controller dictionaries and output paths are initialized, optional complex-controller XML is parsed, and RTC template files are copied to the output folder.

Parameters:

Name Type Description Default
hydamo instance of HyDAMO

data structure containing the HyDAMO DAMO2.2

required
fm instance of FMModel

model structure setup for Hydrolib-core

required
output_path str or Path

base path where an rtc subfolder is created for generated RTC files. Defaults to the current working directory.

None
rtc_onlytimeseries bool

if True, build RTC control from rtc_timeseriesdata only. If True, complex_controllers_folder is ignored. Defaults to False.

False
rtc_timeseriesdata DataFrame

time series data used when rtc_onlytimeseries=True. Column names are expected to match structure IDs. Defaults to None.

None
complex_controllers_folder list[Path or str] or Path or str

folder(s) with custom RTC XML files to import when rtc_onlytimeseries=False. Defaults to None.

None
id_limit_complex_controllers list[str]

whitelist of IDs that may be coupled to complex controller logic. Required when complex_controllers_folder is provided. An empty list means no IDs are allowed.

None
rtc_timestep Union[int, float]

Time step of the RTC model. Defaults to 60 seconds.

60
Source code in hydrolib/dhydamo/core/drtc.py
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def __init__(
    self,
    hydamo: HyDAMO,
    fm: FMModel,
    output_path: str | Path = None,
    rtc_onlytimeseries: bool = False,
    rtc_timeseriesdata: pd.DataFrame=None,
    complex_controllers_folder: list[str | Path] | str | Path = None,
    id_limit_complex_controllers: list[str] | None = None,
    rtc_timestep: int | float = 60,

) -> None:
    """Initialize the DRTCModel.

    Internal controller dictionaries and output paths are initialized, optional
    complex-controller XML is parsed, and RTC template files are copied to the
    output folder.

    Args:
        hydamo (instance of HyDAMO): data structure containing the HyDAMO DAMO2.2
        fm (instance of FMModel): model structure setup for Hydrolib-core
        output_path (str or Path, optional): base path where an `rtc` subfolder
            is created for generated RTC files. Defaults to the current working
            directory.
        rtc_onlytimeseries (bool): if True, build RTC control from
            `rtc_timeseriesdata` only. If True, `complex_controllers_folder` is
            ignored. Defaults to False.
        rtc_timeseriesdata (pd.DataFrame, optional): time series data used when
            `rtc_onlytimeseries=True`. Column names are expected to match
            structure IDs. Defaults to None.
        complex_controllers_folder (list[Path or str] or Path or str, optional):
            folder(s) with custom RTC XML files to import when
            `rtc_onlytimeseries=False`. Defaults to None.
        id_limit_complex_controllers (list[str], optional): whitelist of IDs that
            may be coupled to complex controller logic. Required when
            `complex_controllers_folder` is provided. An empty list means no IDs
            are allowed.
        rtc_timestep (Union[int, float], optional): Time step of the RTC model.
            Defaults to 60 seconds.
    """
    self.hydamo = hydamo

    self.time_settings = {
        "start": pd.to_datetime(fm.time.refdate, format="%Y%m%d"),
        "end": pd.to_datetime(fm.time.refdate, format="%Y%m%d")
        + pd.to_timedelta(fm.time.tstop, unit="s"),
        "step": rtc_timestep,
    }

    self.pid_controllers = {}
    self.time_controllers = {}
    self.interval_controllers = {}

    # set up the output path
    base_output_path = Path(".") if output_path is None else Path(output_path)
    self.output_path = base_output_path / "rtc"
    self.output_path.mkdir(parents=True, exist_ok=True)

    # Save object id by type
    self.struct_ids_by_type = DRTCModel._get_struct_ids_by_type(self.hydamo)

    # parse user-provided controllers
    self.complex_controllers = None
    self.cc_structs = None
    self.cc_ids = None
    self.cc_id_limit = None
    if rtc_onlytimeseries and complex_controllers_folder is not None:
        # User supplied controllers in timeseries_only mode, emit warning
        logger.warning(
            "`complex_controllers_folder` is ignored because `rtc_onlytimeseries=True`. "
            "Set `rtc_onlytimeseries=False` to enable complex controllers."
        )
    elif not rtc_onlytimeseries and complex_controllers_folder is not None:
        if id_limit_complex_controllers is None:
            # When complex_controllers_folder is supplied, the whitelist
            # needs to be supplied as well
            raise SyntaxError(
                "Missing required `id_limit_complex_controllers` while "
                "`complex_controllers_folder` is provided. Supply a list of "
                "allowed IDs to couple to complex controller logic."
            )

        # Discover all complex controller related structures and id's
        self.cc_structs, self.cc_ids = self._load_complex_controller_structs(
            complex_controllers_folder,
            self.struct_ids_by_type,
            log_validation=True,
        )
        logger.info(
            "Found %d complex controller structures referenced in XML: %s",
            len(self.cc_structs),
            self.cc_ids,
        )

        # Save whitelist of allowed controller ids.
        self.cc_id_limit = set(id_limit_complex_controllers)
        if len(self.cc_id_limit) == 0:
            logger.warning(
                "`id_limit_complex_controllers` is empty. No IDs are allowed, "
                "so all complex controller references will be filtered out."
            )
        else:
            logger.info(
                "Applying complex controller ID filter with %d allowed IDs: %s",
                len(self.cc_id_limit),
                self.cc_id_limit,
            )

        # Load complex controllers
        self.complex_controllers = self._load_complex_controllers(complex_controllers_folder)

    # copy files from the template RTC-folder
    self.template_dir = Path(__file__).resolve().parent / ".." / "resources" / "RTC"

    generic_files = [p for p in self.template_dir.iterdir() if p.suffix in {".xsd", ".json"}]
    for filepath in generic_files:
        shutil.copy(filepath, self.output_path / filepath.name)

    if rtc_onlytimeseries:
        for name, data in rtc_timeseriesdata.items():
            if name in hydamo.structures.rweirs_df.id.to_list():
                steering_var = "Crest level (s)"
            if name in hydamo.structures.orifices_df.id.to_list():
                steering_var = 'Gate lower edge level (s)'
            if name in hydamo.structures.pumps_df.id.to_list():
                steering_var = 'Capacity (p)'
            self.add_time_controller(
                structure_id=name, steering_variable=steering_var, data=data
            )
        self.check_timeseries(rtc_timeseriesdata)
        self.complex_controllers  = None
add_interval_controller(structure_id: str = None, steering_variable: str = None, target_variable: str = None, deadband: float | str = None, setpoint: float | str | pd.Series = None, setting_below: float | str = None, setting_above: float | str = None, max_speed: float | str = None, observation_location: str = None, interpolation_option: str = 'LINEAR', extrapolation_option: str = 'BLOCK') -> None

Function to add an Interval controller.

Parameters:

Name Type Description Default
structure_id str

structure iD.

None
steering_variable str

variable to be controlled, usually crest level.

None
target_variable str

target variable (usually water level)

None
deadband float

deadband around the setpoint

None
setpoint Union[float, str, Series]

setpoint value (or timeseries of setpointvalue)

None
setting_below Union[float, str]

value of target variable below setpoint

None
setting_above Union[float, str]

value of target variable above setpoint

None
max_speed Union[float, str]

maximum speed to change target variable

None
observation_location str

id of the observation point

None
interpolation_option str

interpolation option used

'LINEAR'
extrapolation_option str

extrapolation option used

'BLOCK'
Source code in hydrolib/dhydamo/core/drtc.py
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def add_interval_controller(
    self,
    structure_id: str = None,
    steering_variable: str = None,
    target_variable: str = None,
    deadband: float | str = None,
    setpoint: float | str | pd.Series = None,
    setting_below: float | str = None,
    setting_above: float | str = None,
    max_speed: float | str = None,
    observation_location: str = None,
    interpolation_option: str = 'LINEAR',
    extrapolation_option: str = 'BLOCK',
) -> None:
    """Function to add an Interval controller.

    Args:
        structure_id (str): structure iD.
        steering_variable (str): variable to be controlled, usually crest level.
        target_variable (str): target variable (usually water level)
        deadband (float): deadband around the setpoint
        setpoint (Union[float, str, pd.Series]): setpoint value (or timeseries of setpointvalue)
        setting_below (Union[float,str]): value of target variable below setpoint
        setting_above (Union[float, str]): value of target variable above setpoint
        max_speed (Union[float,str]): maximum speed to change target variable
        observation_location (str): id of the observation point
        interpolation_option (str): interpolation option used
        extrapolation_option (str): extrapolation option used
    """
    self.interval_controllers[structure_id] = {
        "type": 'Interval',
        "steering_variable": steering_variable,
        "target_variable": target_variable,
        "setpoint": setpoint,
        "observation_point": observation_location,
        "setting_below": setting_below,
        "setting_above": setting_above,
        "max_speed": max_speed,
        "deadband": deadband,
        "interpolation_option": interpolation_option,
        "extrapolation_option": extrapolation_option,
    }
add_pid_controller(structure_id: str = None, steering_variable: str = None, target_variable: str = None, setpoint: float | str | pd.Series = None, lower_bound: float | str = None, upper_bound: float | str = None, observation_location: str = None, ki: float = 0.001, kp: float = 0.0, kd: float = 0.0, max_speed: float = 0.00033, interpolation_option: str = 'LINEAR', extrapolation_option: str = 'BLOCK') -> None

Function a add PID controller.

Parameters:

Name Type Description Default
structure_id str

structure iD.

None
steering_variable str

variable to be controlled, usually crest level.

None
target_variable str

target variable (usually water level)

None
setpoint Union[float, str, Series]

setpoint value or timeseries of setpointvalue

None
lower_bound Union[float, str]

lowest value to be allowed

None
upper_bound Union[float, str]

highest value to be allowed

None
observation_location str

id of the observation point

None
ki float

gain factor ki

0.001
kp float

faimn factor kp

0.0
kd float

gain factor kd

0.0
max_speed float

maximum speed to change target variable

0.00033
interpolation_option str

interpolation option used

'LINEAR'
extrapolation_option str

extrapolation option used

'BLOCK'
Source code in hydrolib/dhydamo/core/drtc.py
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def add_pid_controller(
    self,
    structure_id: str = None,
    steering_variable: str = None,
    target_variable: str = None,
    setpoint: float | str | pd.Series = None,
    lower_bound: float | str = None,
    upper_bound: float | str = None,
    observation_location: str = None,
    ki: float = 0.001,
    kp: float = 0.0,
    kd: float = 0.0,
    max_speed: float=0.00033,
    interpolation_option: str = 'LINEAR',
    extrapolation_option: str = 'BLOCK',
) -> None:
    """Function a add PID controller.

    Args:
        structure_id (str): structure iD.
        steering_variable (str): variable to be controlled, usually crest level.
        target_variable (str): target variable (usually water level)
        setpoint (Union[float, str, pd.Series]): setpoint value or timeseries of setpointvalue
        lower_bound (Union[float, str]): lowest value to be allowed
        upper_bound (Union[float, str]): highest value to be allowed
        observation_location (str): id of the observation point
        ki (float): gain factor ki
        kp (float): faimn factor kp
        kd (float): gain factor kd
        max_speed (float): maximum speed to change target variable
        interpolation_option (str): interpolation option used
        extrapolation_option (str): extrapolation option used
    """
    self.pid_controllers[structure_id] = {
        "type": "PID",
        "steering_variable": steering_variable,
        "target_variable": target_variable,
        "setpoint": setpoint,
        "observation_point": observation_location,
        "lower_bound": lower_bound,
        "upper_bound": upper_bound,
        "ki": ki,
        "kp": kp,
        "kd": kd,
        'max_speed': max_speed,
        "interpolation_option": interpolation_option,
        "extrapolation_option": extrapolation_option,
    }
add_time_controller(structure_id: str = None, steering_variable: str = None, data: pd.Series = None, interpolation_option: str = 'LINEAR', extrapolation_option: str = 'BLOCK') -> None

Functon to add a time controller to a certain structure.

Parameters:

Name Type Description Default
structure_id str

structure id.

None
steering_variable str

variable that is controlled, usually crest level.

None
data Series

timeseries.

None
interpolation_option str

interpolation option used.

'LINEAR'
extrapolation_option str

extrapolation option used.

'BLOCK'
Source code in hydrolib/dhydamo/core/drtc.py
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def add_time_controller(
    self,
    structure_id: str = None,
    steering_variable: str = None,
    data: pd.Series = None,
    interpolation_option: str = 'LINEAR',
    extrapolation_option: str = 'BLOCK',
) -> None:
    """Functon to add a time controller to a certain structure.

    Args:
        structure_id (str): structure id.
        steering_variable (str): variable that is controlled, usually crest level.
        data (pd.Series): timeseries.
        interpolation_option (str): interpolation option used.
        extrapolation_option (str): extrapolation option used.
    """
    self.time_controllers[structure_id] = {
        "type": "Time",
        "data": data,
        "steering_variable": steering_variable,
        "interpolation_option": interpolation_option,
        "extrapolation_option": extrapolation_option,
    }
allow_struct(cc_id: str, allow_observations: bool = False, allow_if_filter_inactive: bool = True, allow_if_not_referenced: bool = False) -> bool

Return whether a structure id is allowed by the complex-controller filter.

Parameters:

Name Type Description Default
cc_id str

Structure or observation id to evaluate.

required
allow_observations bool

If True, pass through observation ids that exist in the HyDAMO model. Defaults to False.

False
allow_if_filter_inactive bool

Return value when no complex controller filter is active (cc_ids or cc_id_limit is None). Defaults to True.

True
allow_if_not_referenced bool

Return value when filtering is active, but cc_id is not part of cc_ids. Defaults to False.

False
Source code in hydrolib/dhydamo/core/drtc.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
@validate_arguments
def allow_struct(
    self,
    cc_id: str,
    allow_observations: bool = False,
    allow_if_filter_inactive: bool = True,
    allow_if_not_referenced: bool = False,
) -> bool:
    """Return whether a structure id is allowed by the complex-controller filter.

    Args:
        cc_id (str): Structure or observation id to evaluate.
        allow_observations (bool, optional): If True, pass through observation
            ids that exist in the HyDAMO model. Defaults to False.
        allow_if_filter_inactive (bool, optional): Return value when no complex
            controller filter is active (`cc_ids` or `cc_id_limit` is None).
            Defaults to True.
        allow_if_not_referenced (bool, optional): Return value when filtering is
            active, but `cc_id` is not part of `cc_ids`. Defaults to False.
    """
    if allow_observations and cc_id in self.struct_ids_by_type["observations"]:
        return True

    if self.cc_ids is None or self.cc_id_limit is None:
        return allow_if_filter_inactive

    if cc_id not in self.cc_ids:
        return allow_if_not_referenced

    return cc_id in self.cc_id_limit
finish_file(xmlroot, configfile, filename: Path | str) -> None staticmethod

Method to finish a XML file in the required namespace and format.

Parameters:

Name Type Description Default
xmlroot

Xml Tree

required
configfile

Xml file object

required
filename Union[Path, str]

filepath of the file to be written

required
Source code in hydrolib/dhydamo/core/drtc.py
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
@staticmethod
@validate_arguments
def finish_file(xmlroot, configfile, filename: Path | str) -> None:
    """Method to finish a XML file in the required namespace and format.

    Args:
        xmlroot: Xml Tree
        configfile : Xml file object
        filename (Union[Path, str]): filepath of the file to be written
    """
    configfile.write(filename)
    xmlstring = (
        bytes(
            '<?xml version="1.0" encoding="utf-8" standalone="yes" ?>',
            encoding="utf-8",
        )
        + ET.tostring(xmlroot)
    )
    xmlstring = xmlstring.decode("utf-8").replace('\n', '').replace("  ","")
    with open(filename, "w+") as f:
        f.write(xmlstring)
    with open(filename) as f:
        temp = xml.dom.minidom.parseString(f.read())
    with open(filename, "w+") as f:
        f.write(temp.toprettyxml())
from_hydamo(pid_settings: dict | None = None, interval_settings: dict | None = None, timeseries: pd.DataFrame | None = None) -> None

Function to convert HyDAMO management data to controller-dictionaries. So far only time- and PID-controllers are implemented. PID settings can be specified globally or per structdure.

Parameters:

Name Type Description Default
pid_settings dict

RTC settings for PID controllers that are not in the HyDAMO format.

None
interval_settings dict

RTC settings for interval controllers that are not in the HyDAMO format.

None
timeseries Series

timeseries that are input to timecontrollers.

None

Raises:

Type Description
ValueError

errors are raised for inconsistent input data.

Source code in hydrolib/dhydamo/core/drtc.py
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def from_hydamo(
    self, pid_settings: dict | None=None, interval_settings: dict | None=None, timeseries: pd.DataFrame | None=None
) -> None:
    """Function to convert HyDAMO management data to controller-dictionaries. So far only time- and PID-controllers are implemented. PID settings can be specified globally or per structdure.

    Args:
        pid_settings (dict): RTC settings for PID controllers that are not in the HyDAMO format.
        interval_settings (dict): RTC settings for interval controllers that are not in the HyDAMO format.
        timeseries (pandas.Series): timeseries that are input to timecontrollers.

    Raises:
        ValueError: errors are raised for inconsistent input data.

    """
    for _, management in self.hydamo.management.iterrows():
        # first get the structure ID through the coupled items. It can so far be three different structure types.
        if not pd.isna(management.regelmiddelid):
            weir_code = management.stuwid

            if weir_code in list(self.hydamo.structures.rweirs_df.id):
                weir = self.hydamo.structures.rweirs_df[
                    self.hydamo.structures.rweirs_df.id == weir_code
                ]
            elif not self.hydamo.structures.uweirs_df.empty and weir_code in list(self.hydamo.structures.uweirs_df.id):
                weir = self.hydamo.structures.uweirs_df[
                    self.hydamo.structures.uweirs_df.id == weir_code
                ]
            elif not self.hydamo.structures.orifices_df.empty and  weir_code in list(self.hydamo.structures.orifices_df.id):
                weir = self.hydamo.structures.orifices_df[
                    self.hydamo.structures.orifices_df.id == weir_code
                ]
            else:
                logger.warning(
                    f"Management for management_device {management.regelmiddelid} could not be connnected to a structure. Skipping it."
                )
                continue
            struc_id = weir.id.to_numpy()[0]
        elif not pd.isna(management.pompid):
            if not self.hydamo.pumps.empty and management.pompid in list(self.hydamo.pumps.globalid):
                struc_id = self.hydamo.pumps[self.hydamo.pumps.globalid == management.pompid].code.to_numpy()[0]
        else:
            raise ValueError(
                "Only management_devices and pumps can be connected to a management object."
            )
        if management.stuurvariabele == "bovenkant afsluitmiddel":
            steering_variable = "Crest level (s)"
        elif management.stuurvariabele == "hoogte opening":
            steering_variable = "Gate lower edge level (s)"
        elif management.stuurvariabele == "pompdebiet":
            steering_variable = "Capacity (p)"
        else:
            raise ValueError(
                f"Invalid value for steering variable of {struc_id}: {management.stuurvariabele}."
            )

        if management.doelvariabele == "waterstand":
            target_variable = "Water level (op)"
        elif management.doelvariabele == "debiet":
            target_variable = "Discharge (op)"
        else:
            raise ValueError(
                f"Invalid value for target variable of {struc_id}: {management.doelvariabele}."
            )

        if management.typecontroller == "PID":
            #  if the ID is not specified separately, use the global settings
            if pid_settings is None:
                raise ValueError(f'{management.code} contains a PID controller, but no pid_settings are provided. Please do so.')
            if struc_id not in pid_settings:
                ki = pid_settings["global"]["ki"]
                kp = pid_settings["global"]["kp"]
                kd = pid_settings["global"]["kd"]
                max_speed = pid_settings["global"]["maxspeed"]
            else:
                ki = pid_settings[struc_id]['ki']
                kp = pid_settings[struc_id]['kp']
                kd = pid_settings[struc_id]['kd']
                max_speed = pid_settings[struc_id]['maxspeed']

            self.add_pid_controller(
                structure_id=struc_id,
                steering_variable=steering_variable,
                target_variable=target_variable,
                ki=ki,
                kp=kp,
                kd=kd,
                max_speed=max_speed,
                setpoint=management.streefwaarde,
                lower_bound=management.ondergrens,
                upper_bound=management.bovengrens,
                observation_location=management.meetlocatieid,
            )

        elif management.typecontroller == "interval":
            if interval_settings is None:
                raise ValueError(f'{management.code} contains an interval controller, but no interval_settings are provided. Please do so.')

            if struc_id not in interval_settings:
                deadband = interval_settings["global"]["deadband"]
                max_speed = interval_settings["global"]["maxspeed"]
            else:
                deadband = interval_settings[struc_id]['deadband']
                max_speed = interval_settings[struc_id]['maxspeed']

            self.add_interval_controller(
                structure_id=struc_id,
                steering_variable=steering_variable,
                target_variable=target_variable,
                deadband=deadband,
                setting_above=management.bovengrens,
                setting_below=management.ondergrens,
                max_speed=max_speed,
                setpoint=management.streefwaarde,
                observation_location=management.meetlocatieid,
            )

        elif management.typecontroller == "time":
            if timeseries is None:
                 raise ValueError(f'{management.code} contains a time controller, but no time series are provided. Please do so.')
            else:
                data = timeseries.loc[:, struc_id]
                self.add_time_controller(
                    structure_id=struc_id,
                    steering_variable=steering_variable,
                    data=data,
                )
        else:
            logger.warning(
                f"{management.typecontroller} is not a valid controller type - skipped."
            )
parse_complex_controller(xml_folder: Path | str) -> dict[str, list[str | ET.Element]]

Method to parse user-specified 'complex' controllers

Parameters:

Name Type Description Default
xml_folder Union[Path, str]

Folder where the user located the custom XML files

required

Returns:

Name Type Description
dict dict[str, list[str | Element]]

dict of list with the data in the files. Every key is a RTC-file, including the DIMR-config.

Source code in hydrolib/dhydamo/core/drtc.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
@validate_arguments
def parse_complex_controller(
    self, xml_folder: Path | str
) -> dict[str, list[str | ET.Element]]:
    """Method to parse user-specified 'complex' controllers

    Args:
        xml_folder (Union[Path, str]): Folder where the user located the custom XML files

    Returns:
        dict: dict of list with the data in the files. Every key is a RTC-file, including the DIMR-config.
    """
    files = [p for p in Path(xml_folder).iterdir() if p.suffix == ".xml"]
    savedict = {
        "dataconfig_import": [],
        "dataconfig_export": [],
        "toolsconfig_rules": [],
        "toolsconfig_triggers": [],
        "timeseries": [],
        "state": [],
        "dimr_config": [],
    }

    handlers = {
        RTC_DATA_CONFIG_XML: self._parse_cc_rtc_dataconfig,
        RTC_TOOLS_CONFIG_XML: self._parse_cc_rtc_toolsconfig,
        TIMESERIES_IMPORT_XML: self._parse_cc_timeseries,
        STATE_IMPORT_XML: self._parse_cc_state,
        "dimr_config.xml": self._parse_cc_dimr_config,
    }

    for filepath in files:
        handler = handlers.get(filepath.name)
        if handler is None:
            continue
        root = ET.parse(filepath).getroot()
        savedict = handler(root, savedict)

    return savedict
write_dataconfig() -> None

Function to write RtcDataConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package.

Source code in hydrolib/dhydamo/core/drtc.py
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
def write_dataconfig(self) -> None:
    """Function to write RtcDataConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
    generalname = "http://www.wldelft.nl/fews"
    xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
    gn_brackets = "{" + generalname + "}"
    m3unit = "m^3/s"
    munit = "m"

    # registering namespaces
    ET.register_namespace("", generalname)
    ET.register_namespace("xsi", xsi_name)

    # Parsing xml file
    configfile = ET.parse(self.template_dir / "rtcDataConfig_empty.xml")
    myroot = configfile.getroot()

    timeseries_length = len(ET.parse(self.output_path / 'timeseries_import.xml').getroot())

    # implementing standard settings import and exportdata
    a0 = ET.SubElement(myroot[1], gn_brackets + "CSVTimeSeriesFile")
    a0.set("decimalSeparator", ".")
    a0.set("delimiter", ",")
    a0.set("adjointOutput", "false")

    a1 = ET.SubElement(myroot[1], gn_brackets + "PITimeSeriesFile")

    a2 = ET.SubElement(a1, gn_brackets + "timeSeriesFile")
    a2.text = "timeseries_export.xml"

    a3 = ET.SubElement(a1, gn_brackets + "useBinFile")
    a3.text = "false"

   # implementing standard settings import and exportdata
    if timeseries_length > 0:
        # only if timeseries are written to the import
        a4 = ET.SubElement(myroot[0], gn_brackets + "PITimeSeriesFile")
        a5 = ET.SubElement(a4, gn_brackets + "timeSeriesFile")
        a5.text = TIMESERIES_IMPORT_XML
        a6 = ET.SubElement(a4, gn_brackets + "useBinFile")
        a6.text = "false"

      # weir dependable data
    for ikey, key in enumerate(self.all_controllers.keys()):

        controller = self.all_controllers[key]
        # For simple controllers: skip only when this id has an allowed complex controller.
        # If filtering is inactive or the id is not a referenced complex id, do not skip.
        if self.allow_struct(
            cc_id=key,
            allow_observations=False,
            allow_if_filter_inactive=False,
            allow_if_not_referenced=False,
        ):
            logger.warning(
                f"{RTC_DATA_CONFIG_XML}: Skipped writing {controller['type']} control for {key}, complex controller already present"
            )
            continue

        # te importeren data
        if controller['type'] == 'PID':

            input_id = INPUT_PREFIX + controller["observation_point"] + "/" +  controller["target_variable"]

            if myroot[0].find(f".//*[@id='{input_id}']") is None:
                a = ET.SubElement(myroot[0], gn_brackets + "timeSeries")
                a.set("id", input_id)

                b = ET.SubElement(a, gn_brackets + "OpenMIExchangeItem")

                c = ET.SubElement(b, gn_brackets + "elementId")
                c.text = controller["observation_point"]

                d = ET.SubElement(b, gn_brackets + "quantityId")
                d.text = controller["target_variable"]

                e = ET.SubElement(b, gn_brackets + "unit")
                e.text = munit if controller['target_variable'] == 'Water level (op)' else m3unit

            else:
                logger.warning(f"{RTC_DATA_CONFIG_XML}: Skipped writing {input_id}, observation point already present")

            # If a time dependent setpoint is required, add the Time Rule
            if type(controller['setpoint']) is pd.Series:
                a2 = ET.SubElement(myroot[0], gn_brackets + "timeSeries")

                a2.set("id", f"[SP]Control group {key}/PID Rule")
                b2 = ET.SubElement(a2, gn_brackets + "PITimeSeries")

                c2 = ET.SubElement(b2, gn_brackets + "locationId")
                c2.text = f"[PID]Control group {key}/PID Rule"

                d2 = ET.SubElement(b2, gn_brackets + "parameterId")
                d2.text = "SP"

                e2 = ET.SubElement(b2, gn_brackets + "interpolationOption")
                e2.text = controller['interpolation_option']

                e2 = ET.SubElement(b2, gn_brackets + "extrapolationOption")  
                e2.text = controller['extrapolation_option'] # Changed from Block: HL

        elif controller['type'] == 'Interval':               

            input_id = INPUT_PREFIX + controller["observation_point"] + "/" +  controller["target_variable"]
            if myroot.find(f".//*[@id='{input_id}']") is None:
                a = ET.SubElement(myroot[0], gn_brackets + "timeSeries")

                a.set("id", input_id)

                b = ET.SubElement(a, gn_brackets + "OpenMIExchangeItem")

                c = ET.SubElement(b, gn_brackets + "elementId")
                c.text = controller["observation_point"]

                d = ET.SubElement(b, gn_brackets + "quantityId")
                d.text = controller["target_variable"]

                e = ET.SubElement(b, gn_brackets + "unit")
                e.text = munit if controller['target_variable'] == 'Water level (op)' else m3unit
            else:
                logger.warning(f"{RTC_DATA_CONFIG_XML}: Skipped writing {input_id}, observation point already present")

            if type(controller['setpoint']) is pd.Series:
                a2 = ET.SubElement(myroot[0], gn_brackets + "timeSeries")

                a2.set("id", f"[SP]Control group {key}/Interval Rule")
                b3 = ET.SubElement(a2, gn_brackets + "PITimeSeries")

                c3 = ET.SubElement(b3, gn_brackets + "locationId")
                c3.text = f"[IntervalRule]Control group {key}/Interval Rule"

                d3 = ET.SubElement(b3, gn_brackets + "parameterId")
                d3.text = "SP"

                e3 = ET.SubElement(b3, gn_brackets + "interpolationOption")
                e3.text = controller['interpolation_option']

                f3 = ET.SubElement(b3, gn_brackets + "extrapolationOption")
                f3.text = controller['extrapolation_option'] # Changed from Block: HL

        else:
            a = ET.SubElement(myroot[0], gn_brackets + "timeSeries")
            a.set("id", "Control group " + str(key) + "/Time Rule")
            b = ET.SubElement(a, gn_brackets + "PITimeSeries")

            c = ET.SubElement(b, gn_brackets + "locationId")
            c.text = f"[TimeRule]Control group {key}/Time Rule"

            d = ET.SubElement(b, gn_brackets + "parameterId")
            d.text = "TimeSeries"

            e = ET.SubElement(b, gn_brackets + "interpolationOption")
            e.text = controller['interpolation_option']

            e = ET.SubElement(b, gn_brackets + "extrapolationOption")
            e.text = controller['extrapolation_option'] # Changed from Block: HL

        # te exporteren data:
        f = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
        f.set("id", OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"])

        g = ET.SubElement(f, gn_brackets + "OpenMIExchangeItem")

        h = ET.SubElement(g, gn_brackets + "elementId")
        h.text = str(key)

        j = ET.SubElement(g, gn_brackets + "quantityId")
        j.text = controller["steering_variable"]

        k = ET.SubElement(g, gn_brackets + "unit")
        k.text = m3unit if controller["steering_variable"] == 'Capacity (p)' else munit

    for ikey, key in enumerate(self.all_controllers.keys()):
        controller = self.all_controllers[key]

        if controller['type'] == 'PID':
            i = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
            i.set("id", "[IP]Control group " + str(key) + "/PID Rule")

            j = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
            j.set("id", "[DP]Control group " + str(key) + "/PID Rule")

        elif controller['type'] == 'Interval': # Change slightly when working with Interval rule
            j = ET.SubElement(myroot[1], gn_brackets + "timeSeries")
            j.set("id", "[Status]Control group " + str(key) + "/Interval Rule")

    # the parsed complex controllers should be inserted at the right place
    if self.complex_controllers is not None:
        self._append_unique_elements(
            parent=myroot[0],
            elements=self.complex_controllers["dataconfig_import"],
            key_getter=self._dataconfig_timeseries_key,
            file_label=RTC_DATA_CONFIG_XML,
        )
        self._append_unique_elements(
            parent=myroot[1],
            elements=self.complex_controllers["dataconfig_export"],
            key_getter=self._dataconfig_timeseries_key,
            file_label=RTC_DATA_CONFIG_XML,
        )

    self.finish_file(myroot, configfile, self.output_path / RTC_DATA_CONFIG_XML)
write_runtimeconfig() -> None

Function to write RtcRunTimeConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package.

Source code in hydrolib/dhydamo/core/drtc.py
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
def write_runtimeconfig(self) -> None:
    """Function to write RtcRunTimeConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""

    # namespaces for all other xml files
    generalname = "http://www.wldelft.nl/fews"
    xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
    gn_brackets = "{" + generalname + "}"

    # registering namespaces
    ET.register_namespace("", generalname)
    ET.register_namespace("xsi", xsi_name)

    # parsing xml file to python and get the root of the existing xml file
    configfile = ET.parse(
        os.path.join(self.template_dir, "rtcRuntimeConfig_empty.xml")
    )
    myroot = configfile.getroot()

    # convert date and runtime to required input for runtimeconfig file

    # replace start/stop dates and times in xml file
    for x in myroot.iter(gn_brackets + "startDate"):
        x.set("date", dt.strftime(self.time_settings["start"], format="%Y-%m-%d"))
        x.set("time", dt.strftime(self.time_settings["start"], format="%H:%M:%S"))
    for x in myroot.iter(gn_brackets + "endDate"):
        x.set("date", dt.strftime(self.time_settings["end"], format="%Y-%m-%d"))
        x.set("time", dt.strftime(self.time_settings["end"], format="%H:%M:%S"))
    for x in myroot.iter(gn_brackets + "timeStep"):
        x.set("unit", "second")
        x.set("divider", "1")
        x.set("multiplier", str(int(self.time_settings["step"])))

    # write new xml file
    self.finish_file(myroot, configfile, self.output_path / "rtcRuntimeConfig.xml")
write_state_import() -> None

Function to write state_import.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package.

Source code in hydrolib/dhydamo/core/drtc.py
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
def write_state_import(self) -> None:
    """Function to write state_import.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
    generalname = "http://www.openda.org"
    xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
    gn_brackets = "{" + generalname + "}"

    # registering namespaces
    ET.register_namespace("", generalname)
    ET.register_namespace("xsi", xsi_name)

    # Parsing xml file
    configfile = ET.parse(self.template_dir / "state_import_empty.xml")
    myroot = configfile.getroot()

    a0 = ET.SubElement(myroot, gn_brackets + "treeVector")

    for key in self.all_controllers.keys():

        controller = self.all_controllers[key]
        # For simple controllers: skip only when this id has an allowed complex controller.
        # If filtering is inactive or the id is not a referenced complex id, do not skip.
        if self.allow_struct(
            cc_id=key,
            allow_observations=False,
            allow_if_filter_inactive=False,
            allow_if_not_referenced=False,
        ):
            logger.warning(
                f"{STATE_IMPORT_XML}: Skipped writing {controller['type']} control for {key}, complex controller already present"
            )
            continue

        # te importeren data
        a = ET.SubElement(a0, gn_brackets + "treeVectorLeaf")
        a.attrib = {"id": OUTPUT_PREFIX + key + "/" + controller["steering_variable"]}
        b = ET.SubElement(a, gn_brackets + "vector")
        if controller['type'] == 'PID':
            b.text = str(controller["upper_bound"])
        elif controller['type'] == 'Interval':
            b.text = str(max(controller['setting_above'], controller['setting_below'])) # Take the maximum value as a starting value
        else:
            b.text = str(controller["data"].to_numpy()[0])

    # the parsed complex controllers should be inserted at the right place
    if self.complex_controllers is not None:
        self._append_unique_elements(
            parent=myroot[0],
            elements=self.complex_controllers["state"],
            key_getter=self._state_leaf_key,
            file_label=STATE_IMPORT_XML,
        )

    self.finish_file(myroot, configfile, self.output_path / STATE_IMPORT_XML)
write_timeseries_import() -> None

Function to write timeseries_import.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package.

Source code in hydrolib/dhydamo/core/drtc.py
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
def write_timeseries_import(self) -> None:
    """Function to write timeseries_import.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
    generalname = "http://www.wldelft.nl/fews/PI"
    xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
    gn_brackets = "{" + generalname + "}"

    # registering namespaces
    ET.register_namespace("", generalname)
    ET.register_namespace("xsi", xsi_name)

    # Parsing xml file
    configfile = ET.parse(self.template_dir / "timeseries_import_empty.xml")
    myroot = configfile.getroot()

    for key in self.all_controllers.keys():

        controller = self.all_controllers[key]
        # For simple controllers: skip only when this id has an allowed complex controller.
        # If filtering is inactive or the id is not a referenced complex id, do not skip.
        if self.allow_struct(
            cc_id=key,
            allow_observations=False,
            allow_if_filter_inactive=False,
            allow_if_not_referenced=False,
        ):
            logger.warning(
                f"{TIMESERIES_IMPORT_XML}: Skipped writing {controller['type']} control for {key}, complex controller already present"
            )
            continue

        if controller['type'] == 'Time':
            # te importeren data
            dates = pd.to_datetime(controller["data"].index).strftime("%Y-%m-%d")
            times = pd.to_datetime(controller["data"].index).strftime("%H:%M:%S")
            timestep = (
                pd.to_datetime(controller["data"].index)[1]
                - pd.to_datetime(controller["data"].index)[0]
            ).total_seconds()
            a = ET.SubElement(myroot, gn_brackets + "series")
            b = ET.SubElement(a, gn_brackets + "header")
            c = ET.SubElement(b, gn_brackets + "type")
            c.text = "instantaneous"
            d = ET.SubElement(b, gn_brackets + "locationId")
            d.text = f"[TimeRule]Control group {key}/Time Rule"
            e = ET.SubElement(b, gn_brackets + "parameterId")
            e.text = "TimeSeries"
            f = ET.SubElement(b, gn_brackets + "timeStep")
            f.attrib = {
                "unit": "minute",
                "multiplier": str(int(timestep / 60.0)),
                "divider": str(1),
            }
            g = ET.SubElement(b, gn_brackets + "startDate")
            g.attrib = {"date": dates[0], "time": times[0]}
            h = ET.SubElement(b, gn_brackets + "endDate")
            h.attrib = {"date": dates[-1], "time": times[-1]}
            i = ET.SubElement(b, gn_brackets + "missVal")
            i.text = "-999.0"
            j = ET.SubElement(b, gn_brackets + "stationName")
            j.text = ""
            for i in range(len(controller["data"])):
                k = ET.SubElement(a, gn_brackets + "event")
                k.attrib = {
                    "date": dates[i],
                    "time": times[i],
                    "value": str(controller["data"].to_numpy()[i]),
                }
        elif controller['type'] == "Interval":
            if isinstance(controller['setpoint'], float):
                controller['setpoint'] = pd.Series([controller['setpoint'],controller['setpoint']], index=[self.time_settings['start'],self.time_settings['end']])

            # te importeren data
            dates = pd.to_datetime( controller["setpoint"].index).strftime("%Y-%m-%d")
            times = pd.to_datetime(controller["setpoint"].index).strftime("%H:%M:%S")
            timestep = (pd.to_datetime(f'{dates[1]} {times[1]}') - pd.to_datetime(f'{dates[0]} {times[0]}')).total_seconds()

            a = ET.SubElement(myroot, gn_brackets + "series")
            b = ET.SubElement(a, gn_brackets + "header")
            c = ET.SubElement(b, gn_brackets + "type")
            c.text = "instantaneous"

            d = ET.SubElement(b, gn_brackets + "locationId")
            d.text = f"[IntervalRule]Control group {key}/Interval Rule"

            e = ET.SubElement(b, gn_brackets + "parameterId")
            e.text = "SP"
            f = ET.SubElement(b, gn_brackets + "timeStep")
            f.attrib = {
                "unit": "minute",
                "multiplier": str(int(timestep / 60.0)),
                "divider": str(1),
            }
            g = ET.SubElement(b, gn_brackets + "startDate")
            g.attrib = {"date": dates[0], "time": times[0]}
            h = ET.SubElement(b, gn_brackets + "endDate")
            h.attrib = {"date": dates[-1], "time": times[-1]}
            i = ET.SubElement(b, gn_brackets + "missVal")
            i.text = "-999.0"
            for i in range(len(controller["setpoint"])):
                k = ET.SubElement(a, gn_brackets + "event")
                k.attrib = {
                    "date": dates[i],
                    "time": times[i],
                    "value": str(controller["setpoint"].to_numpy()[i]),
                }

        # Create a timeseries import if a time-dependent setpoint is used
        elif controller['type'] == 'PID' and isinstance(controller['setpoint'], pd.Series):
            # te importeren data
            dates = pd.to_datetime(controller["setpoint"].index).strftime("%Y-%m-%d")
            times = pd.to_datetime(controller["setpoint"].index).strftime("%H:%M:%S")
            timestep = (
                pd.to_datetime(controller["setpoint"].index)[1]
                - pd.to_datetime(controller["setpoint"].index)[0]
            ).total_seconds()
            a = ET.SubElement(myroot, gn_brackets + "series")
            b = ET.SubElement(a, gn_brackets + "header")
            c = ET.SubElement(b, gn_brackets + "type")
            c.text = "instantaneous"

            if controller['type'] =='PID':
                d = ET.SubElement(b, gn_brackets + "locationId")
                d.text = f"[PID]Control group {key}/PID Rule"
            elif controller['type'] == 'Interval':
                d = ET.SubElement(b, gn_brackets + "locationId")
                d.text = f"[IntervalRule]Control group {key}/Interval Rule"

            e = ET.SubElement(b, gn_brackets + "parameterId")
            e.text = "SP"
            f = ET.SubElement(b, gn_brackets + "timeStep")
            f.attrib = {
                "unit": "minute",
                "multiplier": str(int(timestep / 60.0)),
                "divider": str(1),
            }
            g = ET.SubElement(b, gn_brackets + "startDate")
            g.attrib = {"date": dates[0], "time": times[0]}
            h = ET.SubElement(b, gn_brackets + "endDate")
            h.attrib = {"date": dates[-1], "time": times[-1]}
            i = ET.SubElement(b, gn_brackets + "missVal")
            i.text = "-999.0"
            j = ET.SubElement(b, gn_brackets + "stationName")
            j.text = ""
            for i in range(len(controller["setpoint"])):
                k = ET.SubElement(a, gn_brackets + "event")
                k.attrib = {
                    "date": dates[i],
                    "time": times[i],
                    "value": str(controller["setpoint"].to_numpy()[i]),
                }

    if self.complex_controllers is not None:
        self._append_unique_elements(
            parent=myroot,
            elements=self.complex_controllers["timeseries"],
            key_getter=self._timeseries_series_key,
            file_label=TIMESERIES_IMPORT_XML,
        )

    self.finish_file(myroot, configfile, self.output_path / TIMESERIES_IMPORT_XML)
write_toolsconfig() -> None

Function to write RtcToolsConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package.

Source code in hydrolib/dhydamo/core/drtc.py
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
def write_toolsconfig(self) -> None:
    """Function to write RtcToolsConfig.xml from the created dictionaries. They are built from empty files in the template directory using the Etree-package."""
    generalname = "http://www.wldelft.nl/fews"
    xsi_name = "http://www.w3.org/2001/XMLSchema-instance"
    gn_brackets = "{" + generalname + "}"

    # registering namespaces
    ET.register_namespace("", generalname)
    ET.register_namespace("xsi", xsi_name)

    # parsing xml file
    configfile = ET.parse(self.template_dir / "rtcToolsConfig_empty.xml")
    myroot = configfile.getroot()

    self.all_controllers = self.time_controllers.copy()
    self.all_controllers.update(self.pid_controllers)
    self.all_controllers.update(self.interval_controllers)

    to_remove = []
    for key in self.all_controllers.keys():

        controller = self.all_controllers[key]
        # For simple controllers: skip only when this id has an allowed complex controller.
        # If filtering is inactive or the id is not a referenced complex id, do not skip.
        if self.allow_struct(
            cc_id=key,
            allow_observations=False,
            allow_if_filter_inactive=False,
            allow_if_not_referenced=False,
        ):
            logger.warning(
                f"RtcToolsConfig.xml: Skipped writing {controller['type']} control for {key}, complex controller already present"
            )
            to_remove.append(key)
            continue

        a = ET.SubElement(myroot[1], gn_brackets + "rule")
        if controller['type'] == "PID":

            # rule type (PID)
            b = ET.SubElement(a, gn_brackets + "pid")
            b.set("id", "[PID]" + "Control group " + str(key) + "/PID Rule")

            # standard settings
            c = ET.SubElement(b, gn_brackets + "mode")
            c.text = "PIDVEL"

            d = ET.SubElement(b, gn_brackets + "settingMin")
            d.text = str(controller["lower_bound"])

            e = ET.SubElement(b, gn_brackets + "settingMax")
            e.text = str(controller["upper_bound"])

            f = ET.SubElement(b, gn_brackets + "settingMaxSpeed")
            f.text = str(controller["max_speed"])

            g = ET.SubElement(b, gn_brackets + "kp")
            g.text = str(controller["kp"])

            h = ET.SubElement(b, gn_brackets + "ki")
            h.text = str(controller["ki"])

            i = ET.SubElement(b, gn_brackets + "kd")
            i.text = str(controller["kd"])

            # input
            j = ET.SubElement(b, gn_brackets + "input")

            k = ET.SubElement(j, gn_brackets + "x")
            k.text = (
                INPUT_PREFIX
                + controller["observation_point"]
                + "/"
                + controller["target_variable"]
            )

            # If setpoint varies in time
            if isinstance(controller["setpoint"], pd.Series):
                ll = ET.SubElement(j, gn_brackets + "setpointSeries")
                ll.text = "[SP]" + "Control group " + str(key) + "/PID Rule"
            # Else fixed setpoint
            else:
                ll = ET.SubElement(j, gn_brackets + "setpointValue")
                ll.text = str(controller["setpoint"])

            # output
            m = ET.SubElement(b, gn_brackets + "output")

            o = ET.SubElement(m, gn_brackets + "y")
            o.text = OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"]

            p = ET.SubElement(m, gn_brackets + "integralPart")
            p.text = "[IP]" + "Control group " + str(key) + "/PID Rule"

            q = ET.SubElement(m, gn_brackets + "differentialPart")
            q.text = "[DP]" + "Control group " + str(key) + "/PID Rule"

        elif controller['type'] == 'Interval':
            # Interval RTC
            # rule type (Interval)
            b = ET.SubElement(a, gn_brackets + "interval")
            b.set("id", "[IntervalRule]" + "Control group " + str(key) + "/Interval Rule")

            # standard settings
            d = ET.SubElement(b, gn_brackets + "settingBelow")
            d.text = str(controller["setting_below"])

            e = ET.SubElement(b, gn_brackets + "settingAbove")
            e.text = str(controller["setting_above"])

            f = ET.SubElement(b, gn_brackets + "settingMaxSpeed")
            f.text = str(controller["max_speed"])

            g = ET.SubElement(b, gn_brackets + "deadbandSetpointAbsolute")
            g.text = str(controller["deadband"])

            # input
            j = ET.SubElement(b, gn_brackets + "input")

            k = ET.SubElement(j, gn_brackets + "x") # leave ref = "EXPLICIT" out for now
            k.text = (
                INPUT_PREFIX
                + controller["observation_point"]
                + "/"
                + controller["target_variable"]
            )
            # If setpoint varies in time
            ll = ET.SubElement(j, gn_brackets + "setpoint")
            ll.text = "[SP]" + "Control group " + str(key) + "/Interval Rule"

            # output
            m = ET.SubElement(b, gn_brackets + "output")

            o = ET.SubElement(m, gn_brackets + "y")
            o.text = OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"]

            p = ET.SubElement(m, gn_brackets + "status")
            p.text = "[Status]" + "Control group " + str(key) + "/Interval Rule"
        # Add time rule
        else:
            # rule type (timeabsolute)
            b = ET.SubElement(a, gn_brackets + "timeAbsolute")
            b.set("id", "[TimeRule]" + "Control group " + str(key) + "/Time Rule")

            # input
            c = ET.SubElement(b, gn_brackets + "input")

            d = ET.SubElement(c, gn_brackets + "x")
            d.text = "Control group " + str(key) + "/Time Rule"

            e = ET.SubElement(b, gn_brackets + "output")

            f = ET.SubElement(e, gn_brackets + "y")
            f.text = OUTPUT_PREFIX + str(key) + "/" + controller["steering_variable"]

    # remove controllers that have complex controllers
    for key in to_remove:
        del self.all_controllers[key]

    # elements that are parsed from user specified files should be inserted at the right place.
    if self.complex_controllers is not None:
        for ctl in self.complex_controllers["toolsconfig_rules"]:
            myroot[1].append(ET.fromstring(ctl))
        for ctl in self.complex_controllers["toolsconfig_triggers"]:
            # no trigger block present yet
            if len(myroot) == 2:
                trigger = ET.Element(gn_brackets + "triggers")
                myroot.append(trigger)
                myroot[2].append(ET.fromstring(ctl))
            else:
                myroot[2].append(ET.fromstring(ctl))

    self.finish_file(myroot, configfile, self.output_path / RTC_TOOLS_CONFIG_XML)
write_xml_v1() -> None

Wrapper function to write individual XML files.

Source code in hydrolib/dhydamo/core/drtc.py
1028
1029
1030
1031
1032
1033
1034
def write_xml_v1(self) -> None:
    """Wrapper function to write individual XML files."""
    self.write_runtimeconfig()
    self.write_toolsconfig()
    self.write_timeseries_import()
    self.write_dataconfig()
    self.write_state_import()

DRTCStructure dataclass

Internal dataclass for flow structures referenced in complex controllers

Source code in hydrolib/dhydamo/core/drtc.py
28
29
30
31
32
33
@dataclass
class DRTCStructure:
    "Internal dataclass for flow structures referenced in complex controllers"
    struct_type: str
    struct_name: str
    struct_property: str

hydamo

CrossSections

Source code in hydrolib/dhydamo/core/hydamo.py
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
class CrossSections:
    def __init__(self, hydamo: HyDAMO) -> None:
        """Initiate class variables

        Args:
            hydamo (HyDAMO): input data structure
        """
        self.hydamo = hydamo
        self.crosssections = []
        self.default_definition = None
        self.default_definition_shift = 0.0
        self.default_location = ""

        self.crosssection_loc = {}
        self.crosssection_def = {}

        self.get_roughnessname = self.get_roughness_description

        self.convert = CrossSectionsIO(self)

    def get_roughness_description(self, roughnesstype, value):
        if np.isnan(float(value)):
            raise ValueError("Roughness value should not be NaN.")

        # map HyDAMO definition to D-Hydro definition
        roughnesstype = self.hydamo.roughness_mapping[roughnesstype]

        # Get name
        name = f"{roughnesstype}_{float(value)}"

        # Check if the description is already known
        if name.lower() in map(str.lower, self.hydamo.roughness_definitions.keys()):
            return name

        # Add to dict
        self.hydamo.roughness_definitions[name] = {
            "frictionid": name,
            "frictiontype": roughnesstype,
            "frictionvalue": value,
        }

        return name

    def set_default_definition(self, definition, shift=0.0):
        if definition not in self.crosssection_def.keys():
            raise KeyError(f'Cross section definition "{definition}" not found."')

        self.default_definition = definition
        self.default_definition_shift = shift

    def set_default_locations(self, locations):
        """
        Add default profile locations
        """
        self.default_locations = locations

    def add_yz_definition(
        self, yz=None, thalweg=None, roughnesstype=None, roughnessvalue=None, name=None
    ):
        """
        Add xyz crosssection

        Parameters
        ----------
        code : str
            Id of cross section
        branch : str
            Name of branch
        offset : float
            Position of cross section along branch. If not given, the position is determined
            from the branches in the network. These should thus be given in this case.
        crds : np.array
            Nx2 array with y, z coordinates
        """

        # get coordinates
        length, z = yz.T
        if name is None:
            name = f"yz_{yz}:08d"

        # Get roughnessname
        roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

        # Add to dictionary
        self.crosssection_def[name] = {
            "id": name,
            "type": "yz",
            "thalweg": np.round(thalweg, decimals=3),
            "yzcount": len(z),
            "ycoordinates": self.hydamo.list_to_str(length),
            "zcoordinates": self.hydamo.list_to_str(z),
            "sectioncount": 1,
            "frictionids": roughnessname,
            "frictionpositions": self.hydamo.list_to_str([length[0], length[-1]]),
        }

        return name

    def add_circle_definition(self, diameter, roughnesstype, roughnessvalue, name=None):
        """
        Add circle cross section. The cross section name is derived from the shape and roughness,
        so similar cross sections will result in a single definition.
        """
        # Get name if not given
        if name is None:
            name = f"circ_d{diameter:.3f}"

        # Get roughnessname
        roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

        # Add to dictionary
        self.crosssection_def[name] = {
            "id": name,
            "type": "circle",
            "thalweg": 0.0,
            "diameter": diameter,
            "frictionid": roughnessname,
        }

        return name

    def add_rectangle_definition(
        self, height, width, closed, roughnesstype, roughnessvalue, name=None
    ):
        """
        Add rectangle cross section. The cross section name is derived from the shape and roughness,
        so similar cross sections will result in a single definition.
        """
        # Get name if not given
        if name is None:
            name = f"rect_h{height:.3f}_w{width:.3f}"

        # Get roughnessname
        roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

        # Add to dictionary
        self.crosssection_def[name] = {
            "id": name,
            "type": "rectangle",
            "thalweg": 0.0,
            "height": height,
            "width": width,
            "closed": int(closed),
            "frictionid": roughnessname,
        }

        return name

    def add_trapezium_definition(
        self,
        slope,
        maximumflowwidth,
        bottomwidth,
        closed,
        roughnesstype,
        roughnessvalue,
        bottomlevel=None,
        name=None,
    ):
        """
        Add rectangle cross section. The cross section name is derived from the shape and roughness,
        so similar cross sections will result in a single definition.
        """
        # Get name if not given
        if name is None:
            name = f"trapz_s{slope:.1f}_bw{bottomwidth:.1f}_bw{maximumflowwidth:.1f}"

        # Get roughnessname
        roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

        if bottomlevel is None:
            bottomlevel = 0.0

        if not closed:
            levels = f"{bottomlevel} 100"
            flowwidths = (
                f"{bottomwidth:.2f} {bottomwidth + 2.*((100.0-bottomlevel)*slope):.2f}"
            )
        else:
            levels = f"0 {((maximumflowwidth - bottomwidth)/2.0) / slope:.2f}"
            flowwidths = f"{bottomwidth:.2f} {maximumflowwidth:.2f}"

        # Add to dictionary
        self.crosssection_def[name] = {
            "id": name,
            "type": "zw",
            "thalweg": 0.0,
            "numlevels": 2,
            "levels": levels,
            "flowwidths": flowwidths,
            "totalwidths": flowwidths,
            "frictionid": roughnessname,
        }

        return name

    def add_zw_definition(
        self,
        numLevels,
        levels,
        flowWidths,
        totalWidths,
        roughnesstype,
        roughnessvalue,
        name=None,
    ):
        """
        Add zw cross section. The cross section name is derived from the shape and roughness,
        so similar cross sections will result in a single definition.
        """
        # Get name if not given
        if name is None:
            name = (
                f'zw_h{levels.replace(" ","_"):.1f}_w{flowWidths.replace(" ","_"):.1f}'
            )

        # Get roughnessname
        roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

        # Add to dictionary
        self.crosssection_def[name] = {
            "id": name,
            "type": "zw",
            "thalweg": 0.0,
            "numlevels": int(numLevels),
            "levels": levels,
            "flowwidths": flowWidths,
            "totalwidths": totalWidths,
            "frictionid": roughnessname,
        }

        return name

    def add_crosssection_location(
        self, branchid, chainage, definition, minz=np.nan, shift=0.0
    ):
        descr = f"{branchid}_{chainage:.1f}"
        # Add cross section location
        self.crosssection_loc[descr] = {
            "id": descr,
            "branchid": branchid,
            "chainage": chainage,
            "shift": shift,
            "definitionId": definition,
        }

    def get_branches_without_crosssection(self):
        # First find all branches that match a cross section
        branch_ids = {dct["branchid"] for _, dct in self.crosssection_loc.items()}
        # Select the branch-ids that do nog have a matching cross section
        branches = self.hydamo.branches
        no_crosssection = branches.index[~np.isin(branches.index, list(branch_ids))]

        return no_crosssection.tolist()

    def get_structures_without_crosssection(self):
        csdef_ids = [dct["id"] for _, dct in self.crosssection_def.items()]
        no_crosssection = []
        bridge_ids = [
            dct["csdefid"] for _, dct in self.hydamo.structures.bridges_df.iterrows()
        ]
        no_cross_bridge = np.asarray(bridge_ids)[
            ~np.isin(bridge_ids, csdef_ids)
        ].tolist()
        no_crosssection = no_crosssection + no_cross_bridge
        culvert_ids = [
            dct["csdefid"] for _, dct in self.hydamo.structures.culverts_df.iterrows()
        ]
        no_cross_culvert = np.asarray(culvert_ids)[
            ~np.isin(culvert_ids, csdef_ids)
        ].tolist()
        no_crosssection = no_crosssection + no_cross_culvert
        return no_crosssection

    def get_bottom_levels(self):
        """Method to determine bottom levels from cross sections"""

        # Initialize lists
        data = []
        geometry = []

        for key, css in self.crosssection_loc.items():
            # Get location
            geometry.append(
                self.dflowfmmodel.network.schematised.at[
                    css["branchid"], "geometry"
                ].interpolate(css["chainage"])
            )
            shift = css["shift"]

            # Get depth from definition if yz and shift
            definition = self.crosssection_def[css["definitionId"]]
            minz = shift
            if definition["type"] == "yz":
                minz += min(float(z) for z in definition["zCoordinates"].split())

            data.append([css["branchid"], css["chainage"], minz])

        # Add to geodataframe
        gdf = gpd.GeoDataFrame(
            data=data, columns=["branchid", "chainage", "minz"], geometry=geometry
        )
        return gdf

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def crosssection_to_yzprofiles(
        self,
        crosssections: gpd.GeoDataFrame | ExtendedGeoDataFrame,
        roughness: ExtendedDataFrame,
        branches: ExtendedGeoDataFrame | None,
        roughness_variant: RoughnessVariant = None,
    ) -> dict:
        """
        Function to convert hydamo cross sections 'dwarsprofiel' to
        dflowfm input.

        Parameters
        ----------
        crosssections : gpd.GeoDataFrame
            GeoDataFrame with x,y,z-coordinates of cross sections

        Returns
        -------
        dictionary
            Dictionary with attributes of cross sections, usable for dflowfm
        """
        cssdct = {}

        for css in crosssections.itertuples():
            # The cross sections from hydamo are all yz profiles

            # Determine yz_values
            xyz = np.vstack(css.geometry.coords[:])
            length = np.r_[
                0, np.cumsum(np.hypot(np.diff(xyz[:, 0]), np.diff(xyz[:, 1])))
            ]
            yz = np.c_[length, xyz[:, -1]]
            # the GUI cannot cope with identical y-coordinates. Add 1 cm to a 2nd duplicate.
            yz[:, 0] = np.round(yz[:, 0], 3)
            for i in range(1, yz.shape[0]):
                if yz[i, 0] <= yz[i - 1, 0]:
                    yz[i, 0] = yz[i-1,0] + 0.01

            # determine thalweg
            if branches is not None:
                branche_geom = branches[branches.code == css.branch_id].geometry.to_numpy()

                if css.geometry.intersection(branche_geom[0]).geom_type == "MultiPoint":
                    thalweg_xyz = css.geometry.intersection(branche_geom[0]).geoms[0].coords[
                        :
                    ][0]
                else:
                    thalweg_xyz = css.geometry.intersection(branche_geom[0]).coords[:][
                        0
                    ]
                # and the Y-coordinate of the thalweg
                thalweg = np.hypot(
                    thalweg_xyz[0] - xyz[0, 0], thalweg_xyz[1] - xyz[0, 1]
                )
            else:
                thalweg = 0.0

            if roughness_variant == RoughnessVariant.HIGH:
                ruwheid = roughness[
                    roughness["profielpuntid"] == css.globalid
                ].ruwheidhoog
            if roughness_variant == RoughnessVariant.LOW:
                ruwheid = roughness[
                    roughness["profielpuntid"] == css.globalid
                ].ruwheidlaag

            # Add to dictionary
            cssdct[css.code] = {
                "branchid": css.branch_id,
                "chainage": css.branch_offset,
                "yz": yz,
                "thalweg": thalweg,
                "typeruwheid": roughness[
                    roughness["profielpuntid"] == css.globalid
                ].typeruwheid.to_numpy()[0],
                "ruwheid": float(ruwheid.iloc[0]),
            }

        return cssdct

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def parametrised_to_profiles(
        self,
        parametrised: ExtendedDataFrame,
        parametrised_values: ExtendedDataFrame,
        branches: list,
        roughness_variant: RoughnessVariant = None,
    ) -> dict:
        """
        Generate parametrised cross sections for all branches,
        or the branches missing a cross section.

        Parameters
        ----------
        parametrised : pd.DataFrame
            GeoDataFrame with geometries and attributes of parametrised profiles.
        branches : list
            List of branches for which the parametrised profiles are derived

        Returns
        -------
        dictionary
            Dictionary with attributes of cross sections, usable for dflowfm
        """

        cssdct = {}
        for param in parametrised.itertuples():
            branch = [
                branch for branch in branches if branch.globalid == param.hydroobjectid
            ]

            values = parametrised_values[
                parametrised_values.normgeparamprofielid == param.normgeparamprofielid
            ]

            # Drop profiles for which not enough data is available to write (as rectangle)
            # nulls = pd.isna(parambranches[['bodembreedte', 'bodemhoogtebenedenstrooms', 'bodemhoogtebovenstrooms']]).any(axis=1).to_numpy()
            # parambranches = parambranches.drop(ExtendedGeoDataFrame(geotype=LineString), parambranches.index[nulls], index_col='code',axis=0)
            # parambranches.drop(parambranches.index[nulls], inplace=True)

            if pd.isna(
                values[values.soortparameter == "bodemhoogte benedenstrooms"].waarde
            ).to_numpy()[0]:
                logger.warning(
                    "bodemhoogte benedenstrooms not available for profile %s.",
                    param.globalid,
                )
            if pd.isna(values[values.soortparameter == "bodembreedte"].waarde).to_numpy()[
                0
            ]:
                logger.warning(
                    "bodembreedte not available for profile %s.",
                    param.globalid,
                )
            if pd.isna(
                values[values.soortparameter == "bodemhoogte bovenstrooms"].waarde
            ).to_numpy()[0]:
                logger.warning(
                    "bodemhoogte bovenstrooms not available for profile %s.",
                    param.globalid,
                )

            # Determine characteristics
            botlev_upper = values[ values.soortparameter == "bodemhoogte bovenstrooms" ].waarde.to_numpy()[0]
            botlev_lower = values[ values.soortparameter == "bodemhoogte benedenstrooms" ].waarde.to_numpy()[0]            

            if pd.isna(
                values[values.soortparameter == "taludhelling linkerzijde"].waarde
            ).to_numpy()[0]:
                css_type = "rectangle"
            else:
                css_type = "trapezium"
                dh1 = (
                    values[
                        values.soortparameter == "hoogte insteek linkerzijde"
                    ].waarde.to_numpy()[0]
                    - (botlev_upper + botlev_lower)/2.
                )
                dh2 = (
                    values[
                        values.soortparameter == "hoogte insteek rechterzijde"
                    ].waarde.to_numpy()[0]
                    - (botlev_upper + botlev_lower)/2.
                )
                # height = (dh1 + dh2) / 2.0
                # Determine maximum flow width and slope (both needed for output)
                maxflowwidth = (
                    values[values.soortparameter == "bodembreedte"].waarde.to_numpy()[0]
                    + values[
                        values.soortparameter == "taludhelling linkerzijde"
                    ].waarde.to_numpy()[0]
                    * dh1
                    + values[
                        values.soortparameter == "taludhelling rechterzijde"
                    ].waarde.to_numpy()[0]
                    * dh2
                )
                slope = (
                    values[
                        values.soortparameter == "taludhelling linkerzijde"
                    ].waarde.to_numpy()[0]
                    + values[
                        values.soortparameter == "taludhelling rechterzijde"
                    ].waarde.to_numpy()[0]
                ) / 2.0

            if roughness_variant == RoughnessVariant.LOW:
                roughness = values.ruwheidlaag.to_numpy()[0]
            elif roughness_variant == RoughnessVariant.HIGH:
                roughness = values.ruwheidhoog.to_numpy()[0]
            else:
                raise ValueError(
                    'Invalid value for roughness_variant; should be "High" or "Low".'
                )
            # Determine name for cross section
            if css_type == "trapezium":
                cssdct[branch[0].Index] = {
                    "type": css_type,
                    "slope": round(slope, 2),
                    "maximumflowwidth": round(maxflowwidth, 1),
                    "bottomwidth": round(
                        values[values.soortparameter == "bodembreedte"].waarde.to_numpy()[
                            0
                        ],
                        3,
                    ),
                    "closed": 0,
                    "thalweg": 0.0,
                    "typeruwheid": values.typeruwheid.to_numpy()[0],
                    "ruwheid": roughness,
                    "bottomlevel_upper": botlev_upper,
                    "bottomlevel_lower": botlev_lower,
                }
            elif css_type == "rectangle":
                cssdct[branch[0].Index] = {
                    "type": css_type,
                    "height": 5.0,
                    "width": round(
                        values[values.soortparameter == "bodembreedte"].waarde.to_numpy()[
                            0
                        ],
                        3,
                    ),
                    "closed": 0,
                    "thalweg": 0.0,
                    "typeruwheid": values.typeruwheid.to_numpy()[0],
                    "ruwheid": roughness,
                    "bottomlevel_upper": botlev_upper,
                    "bottomlevel_lower": botlev_lower,
                }

        return cssdct
__init__(hydamo: HyDAMO) -> None

Initiate class variables

Parameters:

Name Type Description Default
hydamo HyDAMO

input data structure

required
Source code in hydrolib/dhydamo/core/hydamo.py
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
def __init__(self, hydamo: HyDAMO) -> None:
    """Initiate class variables

    Args:
        hydamo (HyDAMO): input data structure
    """
    self.hydamo = hydamo
    self.crosssections = []
    self.default_definition = None
    self.default_definition_shift = 0.0
    self.default_location = ""

    self.crosssection_loc = {}
    self.crosssection_def = {}

    self.get_roughnessname = self.get_roughness_description

    self.convert = CrossSectionsIO(self)
add_circle_definition(diameter, roughnesstype, roughnessvalue, name=None)

Add circle cross section. The cross section name is derived from the shape and roughness, so similar cross sections will result in a single definition.

Source code in hydrolib/dhydamo/core/hydamo.py
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
def add_circle_definition(self, diameter, roughnesstype, roughnessvalue, name=None):
    """
    Add circle cross section. The cross section name is derived from the shape and roughness,
    so similar cross sections will result in a single definition.
    """
    # Get name if not given
    if name is None:
        name = f"circ_d{diameter:.3f}"

    # Get roughnessname
    roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

    # Add to dictionary
    self.crosssection_def[name] = {
        "id": name,
        "type": "circle",
        "thalweg": 0.0,
        "diameter": diameter,
        "frictionid": roughnessname,
    }

    return name
add_rectangle_definition(height, width, closed, roughnesstype, roughnessvalue, name=None)

Add rectangle cross section. The cross section name is derived from the shape and roughness, so similar cross sections will result in a single definition.

Source code in hydrolib/dhydamo/core/hydamo.py
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
def add_rectangle_definition(
    self, height, width, closed, roughnesstype, roughnessvalue, name=None
):
    """
    Add rectangle cross section. The cross section name is derived from the shape and roughness,
    so similar cross sections will result in a single definition.
    """
    # Get name if not given
    if name is None:
        name = f"rect_h{height:.3f}_w{width:.3f}"

    # Get roughnessname
    roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

    # Add to dictionary
    self.crosssection_def[name] = {
        "id": name,
        "type": "rectangle",
        "thalweg": 0.0,
        "height": height,
        "width": width,
        "closed": int(closed),
        "frictionid": roughnessname,
    }

    return name
add_trapezium_definition(slope, maximumflowwidth, bottomwidth, closed, roughnesstype, roughnessvalue, bottomlevel=None, name=None)

Add rectangle cross section. The cross section name is derived from the shape and roughness, so similar cross sections will result in a single definition.

Source code in hydrolib/dhydamo/core/hydamo.py
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
def add_trapezium_definition(
    self,
    slope,
    maximumflowwidth,
    bottomwidth,
    closed,
    roughnesstype,
    roughnessvalue,
    bottomlevel=None,
    name=None,
):
    """
    Add rectangle cross section. The cross section name is derived from the shape and roughness,
    so similar cross sections will result in a single definition.
    """
    # Get name if not given
    if name is None:
        name = f"trapz_s{slope:.1f}_bw{bottomwidth:.1f}_bw{maximumflowwidth:.1f}"

    # Get roughnessname
    roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

    if bottomlevel is None:
        bottomlevel = 0.0

    if not closed:
        levels = f"{bottomlevel} 100"
        flowwidths = (
            f"{bottomwidth:.2f} {bottomwidth + 2.*((100.0-bottomlevel)*slope):.2f}"
        )
    else:
        levels = f"0 {((maximumflowwidth - bottomwidth)/2.0) / slope:.2f}"
        flowwidths = f"{bottomwidth:.2f} {maximumflowwidth:.2f}"

    # Add to dictionary
    self.crosssection_def[name] = {
        "id": name,
        "type": "zw",
        "thalweg": 0.0,
        "numlevels": 2,
        "levels": levels,
        "flowwidths": flowwidths,
        "totalwidths": flowwidths,
        "frictionid": roughnessname,
    }

    return name
add_yz_definition(yz=None, thalweg=None, roughnesstype=None, roughnessvalue=None, name=None)

Add xyz crosssection

Parameters

code : str Id of cross section branch : str Name of branch offset : float Position of cross section along branch. If not given, the position is determined from the branches in the network. These should thus be given in this case. crds : np.array Nx2 array with y, z coordinates

Source code in hydrolib/dhydamo/core/hydamo.py
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
def add_yz_definition(
    self, yz=None, thalweg=None, roughnesstype=None, roughnessvalue=None, name=None
):
    """
    Add xyz crosssection

    Parameters
    ----------
    code : str
        Id of cross section
    branch : str
        Name of branch
    offset : float
        Position of cross section along branch. If not given, the position is determined
        from the branches in the network. These should thus be given in this case.
    crds : np.array
        Nx2 array with y, z coordinates
    """

    # get coordinates
    length, z = yz.T
    if name is None:
        name = f"yz_{yz}:08d"

    # Get roughnessname
    roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

    # Add to dictionary
    self.crosssection_def[name] = {
        "id": name,
        "type": "yz",
        "thalweg": np.round(thalweg, decimals=3),
        "yzcount": len(z),
        "ycoordinates": self.hydamo.list_to_str(length),
        "zcoordinates": self.hydamo.list_to_str(z),
        "sectioncount": 1,
        "frictionids": roughnessname,
        "frictionpositions": self.hydamo.list_to_str([length[0], length[-1]]),
    }

    return name
add_zw_definition(numLevels, levels, flowWidths, totalWidths, roughnesstype, roughnessvalue, name=None)

Add zw cross section. The cross section name is derived from the shape and roughness, so similar cross sections will result in a single definition.

Source code in hydrolib/dhydamo/core/hydamo.py
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
def add_zw_definition(
    self,
    numLevels,
    levels,
    flowWidths,
    totalWidths,
    roughnesstype,
    roughnessvalue,
    name=None,
):
    """
    Add zw cross section. The cross section name is derived from the shape and roughness,
    so similar cross sections will result in a single definition.
    """
    # Get name if not given
    if name is None:
        name = (
            f'zw_h{levels.replace(" ","_"):.1f}_w{flowWidths.replace(" ","_"):.1f}'
        )

    # Get roughnessname
    roughnessname = self.get_roughnessname(roughnesstype, roughnessvalue)

    # Add to dictionary
    self.crosssection_def[name] = {
        "id": name,
        "type": "zw",
        "thalweg": 0.0,
        "numlevels": int(numLevels),
        "levels": levels,
        "flowwidths": flowWidths,
        "totalwidths": totalWidths,
        "frictionid": roughnessname,
    }

    return name
crosssection_to_yzprofiles(crosssections: gpd.GeoDataFrame | ExtendedGeoDataFrame, roughness: ExtendedDataFrame, branches: ExtendedGeoDataFrame | None, roughness_variant: RoughnessVariant = None) -> dict

Function to convert hydamo cross sections 'dwarsprofiel' to dflowfm input.

Parameters

crosssections : gpd.GeoDataFrame GeoDataFrame with x,y,z-coordinates of cross sections

Returns

dictionary Dictionary with attributes of cross sections, usable for dflowfm

Source code in hydrolib/dhydamo/core/hydamo.py
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def crosssection_to_yzprofiles(
    self,
    crosssections: gpd.GeoDataFrame | ExtendedGeoDataFrame,
    roughness: ExtendedDataFrame,
    branches: ExtendedGeoDataFrame | None,
    roughness_variant: RoughnessVariant = None,
) -> dict:
    """
    Function to convert hydamo cross sections 'dwarsprofiel' to
    dflowfm input.

    Parameters
    ----------
    crosssections : gpd.GeoDataFrame
        GeoDataFrame with x,y,z-coordinates of cross sections

    Returns
    -------
    dictionary
        Dictionary with attributes of cross sections, usable for dflowfm
    """
    cssdct = {}

    for css in crosssections.itertuples():
        # The cross sections from hydamo are all yz profiles

        # Determine yz_values
        xyz = np.vstack(css.geometry.coords[:])
        length = np.r_[
            0, np.cumsum(np.hypot(np.diff(xyz[:, 0]), np.diff(xyz[:, 1])))
        ]
        yz = np.c_[length, xyz[:, -1]]
        # the GUI cannot cope with identical y-coordinates. Add 1 cm to a 2nd duplicate.
        yz[:, 0] = np.round(yz[:, 0], 3)
        for i in range(1, yz.shape[0]):
            if yz[i, 0] <= yz[i - 1, 0]:
                yz[i, 0] = yz[i-1,0] + 0.01

        # determine thalweg
        if branches is not None:
            branche_geom = branches[branches.code == css.branch_id].geometry.to_numpy()

            if css.geometry.intersection(branche_geom[0]).geom_type == "MultiPoint":
                thalweg_xyz = css.geometry.intersection(branche_geom[0]).geoms[0].coords[
                    :
                ][0]
            else:
                thalweg_xyz = css.geometry.intersection(branche_geom[0]).coords[:][
                    0
                ]
            # and the Y-coordinate of the thalweg
            thalweg = np.hypot(
                thalweg_xyz[0] - xyz[0, 0], thalweg_xyz[1] - xyz[0, 1]
            )
        else:
            thalweg = 0.0

        if roughness_variant == RoughnessVariant.HIGH:
            ruwheid = roughness[
                roughness["profielpuntid"] == css.globalid
            ].ruwheidhoog
        if roughness_variant == RoughnessVariant.LOW:
            ruwheid = roughness[
                roughness["profielpuntid"] == css.globalid
            ].ruwheidlaag

        # Add to dictionary
        cssdct[css.code] = {
            "branchid": css.branch_id,
            "chainage": css.branch_offset,
            "yz": yz,
            "thalweg": thalweg,
            "typeruwheid": roughness[
                roughness["profielpuntid"] == css.globalid
            ].typeruwheid.to_numpy()[0],
            "ruwheid": float(ruwheid.iloc[0]),
        }

    return cssdct
get_bottom_levels()

Method to determine bottom levels from cross sections

Source code in hydrolib/dhydamo/core/hydamo.py
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
def get_bottom_levels(self):
    """Method to determine bottom levels from cross sections"""

    # Initialize lists
    data = []
    geometry = []

    for key, css in self.crosssection_loc.items():
        # Get location
        geometry.append(
            self.dflowfmmodel.network.schematised.at[
                css["branchid"], "geometry"
            ].interpolate(css["chainage"])
        )
        shift = css["shift"]

        # Get depth from definition if yz and shift
        definition = self.crosssection_def[css["definitionId"]]
        minz = shift
        if definition["type"] == "yz":
            minz += min(float(z) for z in definition["zCoordinates"].split())

        data.append([css["branchid"], css["chainage"], minz])

    # Add to geodataframe
    gdf = gpd.GeoDataFrame(
        data=data, columns=["branchid", "chainage", "minz"], geometry=geometry
    )
    return gdf
parametrised_to_profiles(parametrised: ExtendedDataFrame, parametrised_values: ExtendedDataFrame, branches: list, roughness_variant: RoughnessVariant = None) -> dict

Generate parametrised cross sections for all branches, or the branches missing a cross section.

Parameters

parametrised : pd.DataFrame GeoDataFrame with geometries and attributes of parametrised profiles. branches : list List of branches for which the parametrised profiles are derived

Returns

dictionary Dictionary with attributes of cross sections, usable for dflowfm

Source code in hydrolib/dhydamo/core/hydamo.py
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def parametrised_to_profiles(
    self,
    parametrised: ExtendedDataFrame,
    parametrised_values: ExtendedDataFrame,
    branches: list,
    roughness_variant: RoughnessVariant = None,
) -> dict:
    """
    Generate parametrised cross sections for all branches,
    or the branches missing a cross section.

    Parameters
    ----------
    parametrised : pd.DataFrame
        GeoDataFrame with geometries and attributes of parametrised profiles.
    branches : list
        List of branches for which the parametrised profiles are derived

    Returns
    -------
    dictionary
        Dictionary with attributes of cross sections, usable for dflowfm
    """

    cssdct = {}
    for param in parametrised.itertuples():
        branch = [
            branch for branch in branches if branch.globalid == param.hydroobjectid
        ]

        values = parametrised_values[
            parametrised_values.normgeparamprofielid == param.normgeparamprofielid
        ]

        # Drop profiles for which not enough data is available to write (as rectangle)
        # nulls = pd.isna(parambranches[['bodembreedte', 'bodemhoogtebenedenstrooms', 'bodemhoogtebovenstrooms']]).any(axis=1).to_numpy()
        # parambranches = parambranches.drop(ExtendedGeoDataFrame(geotype=LineString), parambranches.index[nulls], index_col='code',axis=0)
        # parambranches.drop(parambranches.index[nulls], inplace=True)

        if pd.isna(
            values[values.soortparameter == "bodemhoogte benedenstrooms"].waarde
        ).to_numpy()[0]:
            logger.warning(
                "bodemhoogte benedenstrooms not available for profile %s.",
                param.globalid,
            )
        if pd.isna(values[values.soortparameter == "bodembreedte"].waarde).to_numpy()[
            0
        ]:
            logger.warning(
                "bodembreedte not available for profile %s.",
                param.globalid,
            )
        if pd.isna(
            values[values.soortparameter == "bodemhoogte bovenstrooms"].waarde
        ).to_numpy()[0]:
            logger.warning(
                "bodemhoogte bovenstrooms not available for profile %s.",
                param.globalid,
            )

        # Determine characteristics
        botlev_upper = values[ values.soortparameter == "bodemhoogte bovenstrooms" ].waarde.to_numpy()[0]
        botlev_lower = values[ values.soortparameter == "bodemhoogte benedenstrooms" ].waarde.to_numpy()[0]            

        if pd.isna(
            values[values.soortparameter == "taludhelling linkerzijde"].waarde
        ).to_numpy()[0]:
            css_type = "rectangle"
        else:
            css_type = "trapezium"
            dh1 = (
                values[
                    values.soortparameter == "hoogte insteek linkerzijde"
                ].waarde.to_numpy()[0]
                - (botlev_upper + botlev_lower)/2.
            )
            dh2 = (
                values[
                    values.soortparameter == "hoogte insteek rechterzijde"
                ].waarde.to_numpy()[0]
                - (botlev_upper + botlev_lower)/2.
            )
            # height = (dh1 + dh2) / 2.0
            # Determine maximum flow width and slope (both needed for output)
            maxflowwidth = (
                values[values.soortparameter == "bodembreedte"].waarde.to_numpy()[0]
                + values[
                    values.soortparameter == "taludhelling linkerzijde"
                ].waarde.to_numpy()[0]
                * dh1
                + values[
                    values.soortparameter == "taludhelling rechterzijde"
                ].waarde.to_numpy()[0]
                * dh2
            )
            slope = (
                values[
                    values.soortparameter == "taludhelling linkerzijde"
                ].waarde.to_numpy()[0]
                + values[
                    values.soortparameter == "taludhelling rechterzijde"
                ].waarde.to_numpy()[0]
            ) / 2.0

        if roughness_variant == RoughnessVariant.LOW:
            roughness = values.ruwheidlaag.to_numpy()[0]
        elif roughness_variant == RoughnessVariant.HIGH:
            roughness = values.ruwheidhoog.to_numpy()[0]
        else:
            raise ValueError(
                'Invalid value for roughness_variant; should be "High" or "Low".'
            )
        # Determine name for cross section
        if css_type == "trapezium":
            cssdct[branch[0].Index] = {
                "type": css_type,
                "slope": round(slope, 2),
                "maximumflowwidth": round(maxflowwidth, 1),
                "bottomwidth": round(
                    values[values.soortparameter == "bodembreedte"].waarde.to_numpy()[
                        0
                    ],
                    3,
                ),
                "closed": 0,
                "thalweg": 0.0,
                "typeruwheid": values.typeruwheid.to_numpy()[0],
                "ruwheid": roughness,
                "bottomlevel_upper": botlev_upper,
                "bottomlevel_lower": botlev_lower,
            }
        elif css_type == "rectangle":
            cssdct[branch[0].Index] = {
                "type": css_type,
                "height": 5.0,
                "width": round(
                    values[values.soortparameter == "bodembreedte"].waarde.to_numpy()[
                        0
                    ],
                    3,
                ),
                "closed": 0,
                "thalweg": 0.0,
                "typeruwheid": values.typeruwheid.to_numpy()[0],
                "ruwheid": roughness,
                "bottomlevel_upper": botlev_upper,
                "bottomlevel_lower": botlev_lower,
            }

    return cssdct
set_default_locations(locations)

Add default profile locations

Source code in hydrolib/dhydamo/core/hydamo.py
1007
1008
1009
1010
1011
def set_default_locations(self, locations):
    """
    Add default profile locations
    """
    self.default_locations = locations

ExternalForcings

Source code in hydrolib/dhydamo/core/hydamo.py
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
class ExternalForcings:
    def __init__(self, hydamo):
        self.hydamo = hydamo

        self.initial_waterlevel_polygons = gpd.GeoDataFrame(
            columns=["waterlevel", "geometry", "locationtype"]
        )
        self.initial_waterdepth_polygons = gpd.GeoDataFrame(
            columns=["waterdepth", "geometry", "locationtype"]
        )
        self.missing = None

        self.boundary_nodes = {}
        self.lateral_nodes = {}
        self.pattern = "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$"

        self.convert = ExternalForcingsIO(self)

    def set_initial_waterlevel(self, level, polygon=None, name=None, locationtype="1d"):
        """
        Method to set initial water level. A polygon can be given to
        limit the initial water level to a certain extent.

        """
        # Get name is not given as input
        if name is None:
            name = f"wlevpoly{len(self.initial_waterlevel_polygons) + 1:04d}"

        # Add to geodataframe
        if polygon is None:
            new_df = pd.DataFrame(
                {
                    "waterlevel": level,
                    "geometry": polygon,
                    "locationtype": locationtype,
                },
                index=[name],
            )
            self.initial_waterlevel_polygons = new_df
        else:
            self.initial_waterlevel_polygons.loc[name] = {
                "waterlevel": level,
                "geometry": polygon,
                "locationtype": locationtype,
            }

    def set_missing_waterlevel(self, missing):
        """
        Method to set the missing value for the water level.
        this overwrites the water level at missing value in the mdu file.

        Parameters
        ----------
        missing : float
            Water depth
        """
        self.mdu_parameters["WaterLevIni"] = missing

    def set_initial_waterdepth(self, depth, polygon=None, name=None, locationtype="1d"):
        """
        Method to set the initial water depth in the 1d model. The water depth is
        set by determining the water level at the locations of the cross sections.

        Parameters
        ----------
        depth : float
            Water depth
        """
        # Get name is not given as input
        if name is None:
            name = f"wlevpoly{len(self.initial_waterdepth_polygons) + 1:04d}"
        # Add to geodataframe
        if polygon is None:
            new_df = pd.DataFrame(
                {
                    "waterdepth": depth,
                    "geometry": polygon,
                    "locationtype": locationtype,
                },
                index=[name],
            )

            self.initial_waterdepth_polygons = new_df
        else:
            self.initial_waterdepth_polygons.loc[name] = {
                "waterdepth": depth,
                "geometry": polygon,
                "locationtype": locationtype,
            }

    def add_rainfall_2D(self, fName, bctype="rainfall"):
        """
        Parameters
        ----------
        fName : str
            Location of netcdf file containing rainfall rasters
        bctype : str
            Type of boundary condition. Currently only rainfall is supported
        """
        assert bctype in ["rainfall"]

        # Add boundary condition
        self.boundaries["rainfall_2D"] = {
            "file_name": fName,
            "bctype": bctype + "bnd",
        }

    @validate_arguments
    def add_boundary_condition(
        self, name: str, pt, quantity: str, value, mesh1d=None
    ) -> None:
        """
        Add boundary conditions to model:
        - The boundary condition can be discharge or waterlevel, or a Q-H relation
        - Is specified by a geographical location (pt) and a branchid
        - If no branchid is given, the nearest is searched
        - The boundary condition is added to the end of the given or nearest branch.
        Value can be one of three options:
        -  timeseries: a Pandas series with time as index should be given
        -  constant:  a float should be given
        -  Q-H boundary: a dictionary with Q-H 

        Parameters
        ----------
        name : str
            ID of the boundary condition
        pt : tuple or shapely.geometry.Point
            Location of the boundary condition
        bctype : str
            Type of boundary condition. Currently only discharge and waterlevel are supported
        series : pd.Series dict, or float
            If a float, a constant in time boundary condition is used. If a pandas series,
            the values per time step are used. Index should be in datetime format, if dict a Q-H table is assumed
        branchid : str, optional
            ID of the branch. If None, the branch nearest to the given location (pt) is
            searched, by default None
        """

        assert quantity in ["dischargebnd", "waterlevelbnd", 'qhbnd']


        if isinstance(value, pd.Series):
            vec1 = ((value.index - value.index[0]).total_seconds() / 60.0).tolist()
            vec2 = value.to_numpy().tolist()
            startdate = value.index[0].strftime("%Y-%m-%d %H:%M:%S")
            unit1 =  f"minutes since {startdate}"
            unit2 = "m3/s" if quantity == "dischargebnd" else "m"            
        elif isinstance(value, numbers.Real):
            vec1 = None
            vec2 = float(value)
            startdate = "0000-00-00 00:00:00"
            unit1 =  f"minutes since {startdate}"
            unit2 = "m3/s" if quantity == "dischargebnd" else "m"
        else:
            vec1 = value['Q']
            vec2 = value['H']            
            unit1 = 'm3/s'
            unit2 = 'm'

        if name in self.boundary_nodes.keys():
            raise KeyError(
                f'A boundary condition with name "{name}" is already present.'
            )

        if isinstance(pt, tuple):
            pt = Point(*pt)

        # Find the nearest node
        if len(mesh1d._mesh1d.mesh1d_node_id) == 0:
            raise KeyError(
                "To find the closest node a 1d mesh should be created first."
            )
        nodes1d = np.asarray(
            [
                n
                for n in zip(
                    mesh1d._mesh1d.mesh1d_node_x,
                    mesh1d._mesh1d.mesh1d_node_y,
                    mesh1d._mesh1d.mesh1d_node_id,
                )
            ]
        )
        get_nearest = KDTree(nodes1d[:, 0:2])
        _, idx_nearest = get_nearest.query(pt.coords[:])
        nodeid = f"{float(nodes1d[idx_nearest[0],0]):12.6f}_{float(nodes1d[idx_nearest[0],1]):12.6f}"

        # Add boundary condition
        self.boundary_nodes[name] = {
            "id": name,
            "quantity": quantity,
            "vec1": vec1,
            "vec2": vec2,
            "unit1": unit1,
            "unit2": unit2,
            "nodeid": nodeid,
        }


    @validate_arguments
    def add_rain_series(self, name: str, values: list, times: list) -> None:
        """
        Adds a rain series a boundary condition.
        Specify name, values, and times

        Parameters
        ----------
        name : str
            ID of the condition
        values : list of floats
            Values of the rain intensity
        times : list of datetime
            Times for the values
        """
        # Add boundary condition
        self.boundary_nodes[name] = {
            "code": name,
            "bctype": "rainfall",
            "filetype": 1,
            "method": 1,
            "operand": "O",
            "value": values,
            "time": times,
            "geometry": None,
            "branchid": None,
        }

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_lateral(
        self,
        id: str,
        branchid: str,
        chainage: str,
        discharge: pd.Series | float | str,
    ) -> None:
        """Add a lateral to an FM model

        Args:
            id (str): Id of th lateral node
            name (str): name of the node
            branchid (str): branchid it is snapped to
            chainage (str): chainage on the branch
            discharge (str, float, or pd.Series): discharge type: REALTIME when linked to RR, or float (constant value) or a pd.Series with time index
        """
        # Convert time to minutes
        if isinstance(discharge, pd.Series):
            times = (
                (discharge.index - discharge.index[0]).total_seconds() / 60.0
            ).tolist()
            values = discharge.to_numpy().tolist()
            startdate = discharge.index[0].strftime("%Y-%m-%d %H:%M:%S")
        else:
            times = None
            values = None
            startdate = "0000-00-00 00:00:00"

        self.lateral_nodes[id] = {
            "id": id,
            "type": "discharge",
            "locationtype": "1d",
            "branchid": branchid,
            "chainage": chainage,
            "time": times,
            "time_unit": f"minutes since {startdate}",
            "value_unit": "m3/s",
            "value": values,
            "discharge": discharge,
        }
add_boundary_condition(name: str, pt, quantity: str, value, mesh1d=None) -> None

Add boundary conditions to model: - The boundary condition can be discharge or waterlevel, or a Q-H relation - Is specified by a geographical location (pt) and a branchid - If no branchid is given, the nearest is searched - The boundary condition is added to the end of the given or nearest branch. Value can be one of three options: - timeseries: a Pandas series with time as index should be given - constant: a float should be given - Q-H boundary: a dictionary with Q-H

Parameters

name : str ID of the boundary condition pt : tuple or shapely.geometry.Point Location of the boundary condition bctype : str Type of boundary condition. Currently only discharge and waterlevel are supported series : pd.Series dict, or float If a float, a constant in time boundary condition is used. If a pandas series, the values per time step are used. Index should be in datetime format, if dict a Q-H table is assumed branchid : str, optional ID of the branch. If None, the branch nearest to the given location (pt) is searched, by default None

Source code in hydrolib/dhydamo/core/hydamo.py
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
@validate_arguments
def add_boundary_condition(
    self, name: str, pt, quantity: str, value, mesh1d=None
) -> None:
    """
    Add boundary conditions to model:
    - The boundary condition can be discharge or waterlevel, or a Q-H relation
    - Is specified by a geographical location (pt) and a branchid
    - If no branchid is given, the nearest is searched
    - The boundary condition is added to the end of the given or nearest branch.
    Value can be one of three options:
    -  timeseries: a Pandas series with time as index should be given
    -  constant:  a float should be given
    -  Q-H boundary: a dictionary with Q-H 

    Parameters
    ----------
    name : str
        ID of the boundary condition
    pt : tuple or shapely.geometry.Point
        Location of the boundary condition
    bctype : str
        Type of boundary condition. Currently only discharge and waterlevel are supported
    series : pd.Series dict, or float
        If a float, a constant in time boundary condition is used. If a pandas series,
        the values per time step are used. Index should be in datetime format, if dict a Q-H table is assumed
    branchid : str, optional
        ID of the branch. If None, the branch nearest to the given location (pt) is
        searched, by default None
    """

    assert quantity in ["dischargebnd", "waterlevelbnd", 'qhbnd']


    if isinstance(value, pd.Series):
        vec1 = ((value.index - value.index[0]).total_seconds() / 60.0).tolist()
        vec2 = value.to_numpy().tolist()
        startdate = value.index[0].strftime("%Y-%m-%d %H:%M:%S")
        unit1 =  f"minutes since {startdate}"
        unit2 = "m3/s" if quantity == "dischargebnd" else "m"            
    elif isinstance(value, numbers.Real):
        vec1 = None
        vec2 = float(value)
        startdate = "0000-00-00 00:00:00"
        unit1 =  f"minutes since {startdate}"
        unit2 = "m3/s" if quantity == "dischargebnd" else "m"
    else:
        vec1 = value['Q']
        vec2 = value['H']            
        unit1 = 'm3/s'
        unit2 = 'm'

    if name in self.boundary_nodes.keys():
        raise KeyError(
            f'A boundary condition with name "{name}" is already present.'
        )

    if isinstance(pt, tuple):
        pt = Point(*pt)

    # Find the nearest node
    if len(mesh1d._mesh1d.mesh1d_node_id) == 0:
        raise KeyError(
            "To find the closest node a 1d mesh should be created first."
        )
    nodes1d = np.asarray(
        [
            n
            for n in zip(
                mesh1d._mesh1d.mesh1d_node_x,
                mesh1d._mesh1d.mesh1d_node_y,
                mesh1d._mesh1d.mesh1d_node_id,
            )
        ]
    )
    get_nearest = KDTree(nodes1d[:, 0:2])
    _, idx_nearest = get_nearest.query(pt.coords[:])
    nodeid = f"{float(nodes1d[idx_nearest[0],0]):12.6f}_{float(nodes1d[idx_nearest[0],1]):12.6f}"

    # Add boundary condition
    self.boundary_nodes[name] = {
        "id": name,
        "quantity": quantity,
        "vec1": vec1,
        "vec2": vec2,
        "unit1": unit1,
        "unit2": unit2,
        "nodeid": nodeid,
    }
add_lateral(id: str, branchid: str, chainage: str, discharge: pd.Series | float | str) -> None

Add a lateral to an FM model

Parameters:

Name Type Description Default
id str

Id of th lateral node

required
name str

name of the node

required
branchid str

branchid it is snapped to

required
chainage str

chainage on the branch

required
discharge str, float, or pd.Series

discharge type: REALTIME when linked to RR, or float (constant value) or a pd.Series with time index

required
Source code in hydrolib/dhydamo/core/hydamo.py
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def add_lateral(
    self,
    id: str,
    branchid: str,
    chainage: str,
    discharge: pd.Series | float | str,
) -> None:
    """Add a lateral to an FM model

    Args:
        id (str): Id of th lateral node
        name (str): name of the node
        branchid (str): branchid it is snapped to
        chainage (str): chainage on the branch
        discharge (str, float, or pd.Series): discharge type: REALTIME when linked to RR, or float (constant value) or a pd.Series with time index
    """
    # Convert time to minutes
    if isinstance(discharge, pd.Series):
        times = (
            (discharge.index - discharge.index[0]).total_seconds() / 60.0
        ).tolist()
        values = discharge.to_numpy().tolist()
        startdate = discharge.index[0].strftime("%Y-%m-%d %H:%M:%S")
    else:
        times = None
        values = None
        startdate = "0000-00-00 00:00:00"

    self.lateral_nodes[id] = {
        "id": id,
        "type": "discharge",
        "locationtype": "1d",
        "branchid": branchid,
        "chainage": chainage,
        "time": times,
        "time_unit": f"minutes since {startdate}",
        "value_unit": "m3/s",
        "value": values,
        "discharge": discharge,
    }
add_rain_series(name: str, values: list, times: list) -> None

Adds a rain series a boundary condition. Specify name, values, and times

Parameters

name : str ID of the condition values : list of floats Values of the rain intensity times : list of datetime Times for the values

Source code in hydrolib/dhydamo/core/hydamo.py
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
@validate_arguments
def add_rain_series(self, name: str, values: list, times: list) -> None:
    """
    Adds a rain series a boundary condition.
    Specify name, values, and times

    Parameters
    ----------
    name : str
        ID of the condition
    values : list of floats
        Values of the rain intensity
    times : list of datetime
        Times for the values
    """
    # Add boundary condition
    self.boundary_nodes[name] = {
        "code": name,
        "bctype": "rainfall",
        "filetype": 1,
        "method": 1,
        "operand": "O",
        "value": values,
        "time": times,
        "geometry": None,
        "branchid": None,
    }
add_rainfall_2D(fName, bctype='rainfall')
Parameters

fName : str Location of netcdf file containing rainfall rasters bctype : str Type of boundary condition. Currently only rainfall is supported

Source code in hydrolib/dhydamo/core/hydamo.py
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
def add_rainfall_2D(self, fName, bctype="rainfall"):
    """
    Parameters
    ----------
    fName : str
        Location of netcdf file containing rainfall rasters
    bctype : str
        Type of boundary condition. Currently only rainfall is supported
    """
    assert bctype in ["rainfall"]

    # Add boundary condition
    self.boundaries["rainfall_2D"] = {
        "file_name": fName,
        "bctype": bctype + "bnd",
    }
set_initial_waterdepth(depth, polygon=None, name=None, locationtype='1d')

Method to set the initial water depth in the 1d model. The water depth is set by determining the water level at the locations of the cross sections.

Parameters

depth : float Water depth

Source code in hydrolib/dhydamo/core/hydamo.py
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
def set_initial_waterdepth(self, depth, polygon=None, name=None, locationtype="1d"):
    """
    Method to set the initial water depth in the 1d model. The water depth is
    set by determining the water level at the locations of the cross sections.

    Parameters
    ----------
    depth : float
        Water depth
    """
    # Get name is not given as input
    if name is None:
        name = f"wlevpoly{len(self.initial_waterdepth_polygons) + 1:04d}"
    # Add to geodataframe
    if polygon is None:
        new_df = pd.DataFrame(
            {
                "waterdepth": depth,
                "geometry": polygon,
                "locationtype": locationtype,
            },
            index=[name],
        )

        self.initial_waterdepth_polygons = new_df
    else:
        self.initial_waterdepth_polygons.loc[name] = {
            "waterdepth": depth,
            "geometry": polygon,
            "locationtype": locationtype,
        }
set_initial_waterlevel(level, polygon=None, name=None, locationtype='1d')

Method to set initial water level. A polygon can be given to limit the initial water level to a certain extent.

Source code in hydrolib/dhydamo/core/hydamo.py
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
def set_initial_waterlevel(self, level, polygon=None, name=None, locationtype="1d"):
    """
    Method to set initial water level. A polygon can be given to
    limit the initial water level to a certain extent.

    """
    # Get name is not given as input
    if name is None:
        name = f"wlevpoly{len(self.initial_waterlevel_polygons) + 1:04d}"

    # Add to geodataframe
    if polygon is None:
        new_df = pd.DataFrame(
            {
                "waterlevel": level,
                "geometry": polygon,
                "locationtype": locationtype,
            },
            index=[name],
        )
        self.initial_waterlevel_polygons = new_df
    else:
        self.initial_waterlevel_polygons.loc[name] = {
            "waterlevel": level,
            "geometry": polygon,
            "locationtype": locationtype,
        }
set_missing_waterlevel(missing)

Method to set the missing value for the water level. this overwrites the water level at missing value in the mdu file.

Parameters

missing : float Water depth

Source code in hydrolib/dhydamo/core/hydamo.py
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
def set_missing_waterlevel(self, missing):
    """
    Method to set the missing value for the water level.
    this overwrites the water level at missing value in the mdu file.

    Parameters
    ----------
    missing : float
        Water depth
    """
    self.mdu_parameters["WaterLevIni"] = missing

HyDAMO

Main data structure for both the HyDAMO input data and the intermediate dataframes. Contains subclasses for network, structures, cross sections, observation points, storage nodes and external forcings.

Source code in hydrolib/dhydamo/core/hydamo.py
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
class HyDAMO:
    """Main data structure for both the HyDAMO input data and the intermediate dataframes. Contains subclasses
    for network, structures, cross sections, observation points, storage nodes and external forcings.
    """

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def __init__(self, extent_file: Path | str = None) -> None:
        """Initiate subclasses and IO-methods

        Args:
            extent_file (Union[Path, str], optional): model extent, use to clip datsaets. Defaults to None.
        """
        self.network = Network(self)

        self.structures = Structures(self)

        self.crosssections = CrossSections(self)  # Add all items

        self.observationpoints = ObservationPoints(self)

        self.external_forcings = ExternalForcings(self)

        self.storagenodes = StorageNodes(self)

        self.roughness_mapping = {
            "Chezy": "Chezy",
            "Manning": "Manning",
            "StricklerKn": "StricklerNikuradse",
            "StricklerKs": "Strickler",
            "White Colebrook": "WhiteColebrook",
            "Bos en Bijkerk": "deBosBijkerk",
            "Onbekend": "Strickler",
            "Overig": "Strickler",
        }

        # Dictionary for roughness definitions
        self.roughness_definitions = {}

        # Read geometry to clip data
        if extent_file is not None:
            self.clipgeo = gpd.read_file(extent_file).union_all()
        else:
            self.clipgeo = None

        # versioning info
        self.version = {
            "number": dhydamo.__version__,
            "date": datetime.strftime(datetime.now(timezone.utc), "%Y-%m-%dT%H:%M:%S.%fZ"),
            "dimr_version": "Deltares, DIMR_EXE Version 2.00.00.140737 (Win64) (Win64)",
            "suite_version": "D-HYDRO Suite 2024.03 1D2D,",
        }

        # Create standard dataframe for network, crosssections, orifices, weirs
        self.branches = ExtendedGeoDataFrame(
            geotype=LineString,
            required_columns=[
                "code",
                "geometry"
            ],
            related=None
        )

        self.profile = ExtendedGeoDataFrame(
            geotype=LineString,
            required_columns=["code", "geometry", "globalid", "profiellijnid"],
            related={
                "profile_roughness": {
                    "via": "globalid",
                    "on": "profielpuntid",
                    "coupled_to": None
                },
                "profile_line": {
                    "via": "profiellijnid",
                    "on": "globalid",
                    "coupled_to": {
                        "profile_group": {
                            "via": "profielgroepid",
                            "on": "globalid",
                            "coupled_to": None
                        }
                    }
                }
            }
        )
        self.profile_roughness = ExtendedDataFrame(
            required_columns=["profielpuntid"]
        )

        self.profile_line = ExtendedGeoDataFrame(
            geotype=LineString,
            required_columns=["globalid", "profielgroepid"],
            related={
                "profile_group": {
                    "via": "profielgroepid",
                    "on": "globalid",
                    "coupled_to": None
                },
                "profile": {
                    "via": "globalid",
                    "on": "profiellijnid",
                    "coupled_to": {
                        "profile_roughness": {
                            "via": "globalid",
                            "on": "profielpuntid",
                            "coupled_to": None
                        }
                    }
                }
            }
        )

        self.profile_group = ExtendedDataFrame(
            required_columns=[]
        )

        self.param_profile = ExtendedDataFrame(
            required_columns=["globalid", "normgeparamprofielid", "hydroobjectid"]
        )

        self.param_profile_values = ExtendedDataFrame(
            required_columns=[
                "normgeparamprofielid",
                "soortparameter",
                "waarde",
                "ruwheidlaag",
                "ruwheidhoog",
                "typeruwheid",
            ]
        )

        # Weirs
        self.weirs = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=[
                "code",
                "geometry",
                "globalid",                
                "afvoercoefficient",
            ],
            related={
                "opening": {
                    "via": "globalid",
                    "on": "stuwid",
                    "coupled_to": {
                        "management_device": {
                            "via": "globalid",
                            "on": "kunstwerkopeningid",
                            "coupled_to": None
                        }
                    }
                }
            }
        )

        # opening
        self.opening = ExtendedDataFrame(
            required_columns=[            
                "globalid",
                "laagstedoorstroombreedte",
                "laagstedoorstroomhoogte",
                "afvoercoefficient",
            ]
        )

        # opening
        self.closing_device = ExtendedDataFrame(
            required_columns=["code"]
        )

        # opening
        self.management_device = ExtendedDataFrame(
            required_columns=["code", "overlaatonderlaat"]
        )

        # Bridges
        self.bridges = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=[
                "code",
                "globalid",
                "geometry",
                "lengte",
                "intreeverlies",
                "uittreeverlies",
                "ruwheid",
                "typeruwheid",
            ],
            related=None
        )

        # Culverts
        self.culverts = ExtendedGeoDataFrame(
            geotype=LineString,
            required_columns=[
                "code",
                "geometry",
                "lengte",
                "hoogteopening",
                "breedteopening",
                "hoogtebinnenonderkantbene",
                "hoogtebinnenonderkantbov",
                "vormkoker",
                "intreeverlies",
                "uittreeverlies",
                "typeruwheid",
                "ruwheid",
            ],
            related={
                "management_device": {
                    "via": "globalid",
                    "on": "duikersifonhevelid",
                    "coupled_to": None
                }
            }
        )

        # Gemalen
        self.pumpstations = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=[
                "code",
                "globalid",
                "geometry",
            ],
            related={
                "pumps": {
                    "via": "globalid",
                    "on": "gemaalid",
                    "coupled_to": {
                        "management": {
                            "via": "globalid",
                            "on": "pompid",
                            "coupled_to": None
                        }
                    }
                }
            }
        )
        self.pumps = ExtendedDataFrame(
            required_columns=["code", "globalid", "gemaalid", "maximalecapaciteit"]
        )
        self.management = ExtendedDataFrame(
            required_columns=["code", "globalid"]
        )

        # Hydraulische randvoorwaarden
        self.boundary_conditions = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=["code", "typerandvoorwaarde", "geometry"],
            related=None
        )

        # RR catchments
        self.catchments = ExtendedGeoDataFrame(
            geotype=Polygon | MultiPolygon,
            required_columns=["code", "geometry", "globalid", "lateraleknoopid"],
            related={
                "laterals": {
                    "via": "lateraleknoopid",
                    "on": "globalid",
                    "coupled_to": None
                }
            }
        )

        # Laterals
        self.laterals = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=["code", "geometry", "globalid"],
            related={
                "catchments": {
                    "via": "globalid",
                    "on": "lateraleknoopid",
                    "coupled_to": None
                }
            }
        )

        # RR overflows
        self.overflows = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=["code", "geometry", "codegerelateerdobject", "fractie"],
            related={
                "sewer_areas": {
                    "via": "codegerelateerdobject",
                    "on": "code",
                    "coupled_to": None
                }
            }
        )

        # RR sewer areas
        self.sewer_areas = ExtendedGeoDataFrame(
            geotype=Polygon,
            required_columns=["code", "geometry"],
            related={
                "overflows": {
                    "via": "code",
                    "on": "codegerelateerdobject",
                    "coupled_to": None
                }
            }
        )

        # RR overflows
        self.overflows = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=["code", "geometry", "codegerelateerdobject", "fractie"],
            related={
                "sewer_areas": {
                    "via": "codegerelateerdobject",
                    "on": "code",
                    "coupled_to": None
                }
            }
        )

        # RR greenhouse areas
        self.greenhouse_areas = ExtendedGeoDataFrame(
            geotype=Polygon,
            required_columns=["code", "geometry"],
            related={
                "greenhouse_laterals": {
                    "via": "code",
                    "on": "codegerelateerdobject",
                    "coupled_to": None
                }
            }
        )

         # RR overflows
        self.greenhouse_laterals = ExtendedGeoDataFrame(
            geotype=Point,
            required_columns=["code", "geometry", "codegerelateerdobject"],
            related={
                "greenhouse_areas": {
                    "via": "codegerelateerdobject",
                    "on": "code",
                    "coupled_to": None
                }
            }
        )

        # RR overflows
        self.storage_areas = ExtendedGeoDataFrame(
            geotype=Polygon,
            required_columns=["code", "geometry"],            
        )

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def list_to_str(self, lst: list | np.ndarray) -> str:
        """Converts list to string

        Args:
            lst (list): The list

        Returns:
            str: The output string
        """
        if len(lst) == 1:
            string = str(lst)
        else:
            string = " ".join([f"{number:6.3f}" for number in lst])
        return string

    @validate_arguments
    def dict_to_dataframe(self, dictionary: dict) -> pd.DataFrame:
        """Converts a dictionary to dataframe, using index as rows

        Args:
            dictionary (dict): Input dictionary

        Returns:
            pd.DataFrame: Output dataframe
        """

        return pd.DataFrame.from_dict(dictionary, orient="index")

    def snap_to_branch_and_drop(self, extendedgdf, branches, snap_method: str, maxdist=5, drop_related=True):
        """Snap the geometries to the branch and drop loose objects"""

        # Snap the extended geodataframe to branches
        extendedgdf.snap_to_branch(branches, snap_method, maxdist=maxdist)

        # Determine which labels need to be drop for the first object based on
        # nan values for branch_offset.
        drop_idx = extendedgdf[pd.isna(extendedgdf.branch_offset)].index.to_numpy()
        drop_list = [(extendedgdf, drop_idx)]
        logger.info("dropping objects with indices: %s", drop_idx)

        # Find out which labels need to be dropped from related objects
        if drop_related and extendedgdf.related is not None:
            for target_str, relation in extendedgdf.related.items():
                self._recursive_drop_related(drop_list, extendedgdf, drop_idx, target_str, **relation)

        # Drop the relevant rows with the list of labels
        for source, drop_idx in drop_list:
            source.drop(labels=drop_idx, inplace=True)

    def _recursive_drop_related(self, drop_list, source, drop_idx, target_str, via, on, coupled_to):
        target = getattr(self, target_str)
        drop_related = source.loc[drop_idx, via].to_numpy()
        drop_idx = target[target[on].isin(drop_related)].index.to_numpy()
        drop_list.append((target, drop_idx))
        logger.info(
            "  - dropping objects from '%s' with indices: %s",
            target_str,
            drop_idx,
        )

        if coupled_to is not None:
            for next_target_str, next_relation in coupled_to.items():
                return self._recursive_drop_related(drop_list, target, drop_idx, next_target_str, **next_relation)

    def create_laterals(self, qspec_file=None):            
        ## Specifieke afvoeren inlezen
        if qspec_file is not None:
            rr = DRRModel()
            qspec, affine = rr.read_raster(qspec_file, static=True)
            fill_value_specifieke_afvoeren = 0
            qspec = np.where(qspec<0, fill_value_specifieke_afvoeren, qspec)

            ## Afvoer per gebied bepalen met zonal stats
            afvoer_per_gebied = zonal_stats(self.catchments, qspec, affine=affine, stats="mean", all_touched=True, nodata=-2147483647)   

        self.laterals = self.catchments.copy()

        for num, cat in enumerate(self.catchments.itertuples()):
            ## Koppelen van afvoeren met afwateringsgebieden
            if qspec_file is not None:
                q = afvoer_per_gebied[num]['mean'] # mm/d
            else:
                q = np.nan

            area = cat.geometry.area           # m2
            q_m3s = q*area/(1000*86400)        # van mm/d naar m3/s
            self.laterals.at[cat.Index, 'afvoer'] = q_m3s          
            knoopid = cat.lateraleknoopid


            ## Afstand tussen watergang en afwateringsgebied. Als watergang in afwateringsgebied ligt is deze afstand 0.
            distances = self.branches.distance(cat.geometry)
            ## Eén of meer watergangen in afwateringsgebied:
            if sum(distances == 0) > 0:
                ## Watergangen intersecten met afwateringsgebied
                #wg = [watergang for watergang in watergangen.itertuples() if watergang.intersects(cat.geometry]
                selectie = self.branches.intersection(cat.geometry)
                ## Index van de watergang waarop je wilt snappen. Dit is de watergang die in het gebied ligt én het dichtst bij de centroid van het afwateringsgebied is.
                index_watergang = selectie.distance(cat.geometry.centroid).idxmin() 
                ## Combineer de ge intersecte watergangen met de index om het stuk watergang te vinden waarop mag worden gesnapt.
                watergang = selectie.at[index_watergang]
            ## No intersects 
            else:
                ## Vind de watergang die het dichtst bij de centroid van het gebied is
                index_watergang = self.branches.distance(cat.geometry.centroid).idxmin()
                ## Selecteer deze watergang om op te mogen snappen
                watergang = self.branches.at[index_watergang, 'geometry']

            ## Snap de centroid van het afwateringsgebied op de geselecteerde watergang
            lateral = watergang.interpolate(watergang.project(cat.geometry.centroid))
            ## Schrijf de snap-locatie weg in de geodataframe
            self.laterals.at[cat.Index, 'geometry'] = lateral
            self.laterals.at[cat.Index, 'globalid'] = knoopid
            self.laterals.at[cat.Index, 'code']= f'lat_{cat.code}'
__init__(extent_file: Path | str = None) -> None

Initiate subclasses and IO-methods

Parameters:

Name Type Description Default
extent_file Union[Path, str]

model extent, use to clip datsaets. Defaults to None.

None
Source code in hydrolib/dhydamo/core/hydamo.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def __init__(self, extent_file: Path | str = None) -> None:
    """Initiate subclasses and IO-methods

    Args:
        extent_file (Union[Path, str], optional): model extent, use to clip datsaets. Defaults to None.
    """
    self.network = Network(self)

    self.structures = Structures(self)

    self.crosssections = CrossSections(self)  # Add all items

    self.observationpoints = ObservationPoints(self)

    self.external_forcings = ExternalForcings(self)

    self.storagenodes = StorageNodes(self)

    self.roughness_mapping = {
        "Chezy": "Chezy",
        "Manning": "Manning",
        "StricklerKn": "StricklerNikuradse",
        "StricklerKs": "Strickler",
        "White Colebrook": "WhiteColebrook",
        "Bos en Bijkerk": "deBosBijkerk",
        "Onbekend": "Strickler",
        "Overig": "Strickler",
    }

    # Dictionary for roughness definitions
    self.roughness_definitions = {}

    # Read geometry to clip data
    if extent_file is not None:
        self.clipgeo = gpd.read_file(extent_file).union_all()
    else:
        self.clipgeo = None

    # versioning info
    self.version = {
        "number": dhydamo.__version__,
        "date": datetime.strftime(datetime.now(timezone.utc), "%Y-%m-%dT%H:%M:%S.%fZ"),
        "dimr_version": "Deltares, DIMR_EXE Version 2.00.00.140737 (Win64) (Win64)",
        "suite_version": "D-HYDRO Suite 2024.03 1D2D,",
    }

    # Create standard dataframe for network, crosssections, orifices, weirs
    self.branches = ExtendedGeoDataFrame(
        geotype=LineString,
        required_columns=[
            "code",
            "geometry"
        ],
        related=None
    )

    self.profile = ExtendedGeoDataFrame(
        geotype=LineString,
        required_columns=["code", "geometry", "globalid", "profiellijnid"],
        related={
            "profile_roughness": {
                "via": "globalid",
                "on": "profielpuntid",
                "coupled_to": None
            },
            "profile_line": {
                "via": "profiellijnid",
                "on": "globalid",
                "coupled_to": {
                    "profile_group": {
                        "via": "profielgroepid",
                        "on": "globalid",
                        "coupled_to": None
                    }
                }
            }
        }
    )
    self.profile_roughness = ExtendedDataFrame(
        required_columns=["profielpuntid"]
    )

    self.profile_line = ExtendedGeoDataFrame(
        geotype=LineString,
        required_columns=["globalid", "profielgroepid"],
        related={
            "profile_group": {
                "via": "profielgroepid",
                "on": "globalid",
                "coupled_to": None
            },
            "profile": {
                "via": "globalid",
                "on": "profiellijnid",
                "coupled_to": {
                    "profile_roughness": {
                        "via": "globalid",
                        "on": "profielpuntid",
                        "coupled_to": None
                    }
                }
            }
        }
    )

    self.profile_group = ExtendedDataFrame(
        required_columns=[]
    )

    self.param_profile = ExtendedDataFrame(
        required_columns=["globalid", "normgeparamprofielid", "hydroobjectid"]
    )

    self.param_profile_values = ExtendedDataFrame(
        required_columns=[
            "normgeparamprofielid",
            "soortparameter",
            "waarde",
            "ruwheidlaag",
            "ruwheidhoog",
            "typeruwheid",
        ]
    )

    # Weirs
    self.weirs = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=[
            "code",
            "geometry",
            "globalid",                
            "afvoercoefficient",
        ],
        related={
            "opening": {
                "via": "globalid",
                "on": "stuwid",
                "coupled_to": {
                    "management_device": {
                        "via": "globalid",
                        "on": "kunstwerkopeningid",
                        "coupled_to": None
                    }
                }
            }
        }
    )

    # opening
    self.opening = ExtendedDataFrame(
        required_columns=[            
            "globalid",
            "laagstedoorstroombreedte",
            "laagstedoorstroomhoogte",
            "afvoercoefficient",
        ]
    )

    # opening
    self.closing_device = ExtendedDataFrame(
        required_columns=["code"]
    )

    # opening
    self.management_device = ExtendedDataFrame(
        required_columns=["code", "overlaatonderlaat"]
    )

    # Bridges
    self.bridges = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=[
            "code",
            "globalid",
            "geometry",
            "lengte",
            "intreeverlies",
            "uittreeverlies",
            "ruwheid",
            "typeruwheid",
        ],
        related=None
    )

    # Culverts
    self.culverts = ExtendedGeoDataFrame(
        geotype=LineString,
        required_columns=[
            "code",
            "geometry",
            "lengte",
            "hoogteopening",
            "breedteopening",
            "hoogtebinnenonderkantbene",
            "hoogtebinnenonderkantbov",
            "vormkoker",
            "intreeverlies",
            "uittreeverlies",
            "typeruwheid",
            "ruwheid",
        ],
        related={
            "management_device": {
                "via": "globalid",
                "on": "duikersifonhevelid",
                "coupled_to": None
            }
        }
    )

    # Gemalen
    self.pumpstations = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=[
            "code",
            "globalid",
            "geometry",
        ],
        related={
            "pumps": {
                "via": "globalid",
                "on": "gemaalid",
                "coupled_to": {
                    "management": {
                        "via": "globalid",
                        "on": "pompid",
                        "coupled_to": None
                    }
                }
            }
        }
    )
    self.pumps = ExtendedDataFrame(
        required_columns=["code", "globalid", "gemaalid", "maximalecapaciteit"]
    )
    self.management = ExtendedDataFrame(
        required_columns=["code", "globalid"]
    )

    # Hydraulische randvoorwaarden
    self.boundary_conditions = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=["code", "typerandvoorwaarde", "geometry"],
        related=None
    )

    # RR catchments
    self.catchments = ExtendedGeoDataFrame(
        geotype=Polygon | MultiPolygon,
        required_columns=["code", "geometry", "globalid", "lateraleknoopid"],
        related={
            "laterals": {
                "via": "lateraleknoopid",
                "on": "globalid",
                "coupled_to": None
            }
        }
    )

    # Laterals
    self.laterals = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=["code", "geometry", "globalid"],
        related={
            "catchments": {
                "via": "globalid",
                "on": "lateraleknoopid",
                "coupled_to": None
            }
        }
    )

    # RR overflows
    self.overflows = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=["code", "geometry", "codegerelateerdobject", "fractie"],
        related={
            "sewer_areas": {
                "via": "codegerelateerdobject",
                "on": "code",
                "coupled_to": None
            }
        }
    )

    # RR sewer areas
    self.sewer_areas = ExtendedGeoDataFrame(
        geotype=Polygon,
        required_columns=["code", "geometry"],
        related={
            "overflows": {
                "via": "code",
                "on": "codegerelateerdobject",
                "coupled_to": None
            }
        }
    )

    # RR overflows
    self.overflows = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=["code", "geometry", "codegerelateerdobject", "fractie"],
        related={
            "sewer_areas": {
                "via": "codegerelateerdobject",
                "on": "code",
                "coupled_to": None
            }
        }
    )

    # RR greenhouse areas
    self.greenhouse_areas = ExtendedGeoDataFrame(
        geotype=Polygon,
        required_columns=["code", "geometry"],
        related={
            "greenhouse_laterals": {
                "via": "code",
                "on": "codegerelateerdobject",
                "coupled_to": None
            }
        }
    )

     # RR overflows
    self.greenhouse_laterals = ExtendedGeoDataFrame(
        geotype=Point,
        required_columns=["code", "geometry", "codegerelateerdobject"],
        related={
            "greenhouse_areas": {
                "via": "codegerelateerdobject",
                "on": "code",
                "coupled_to": None
            }
        }
    )

    # RR overflows
    self.storage_areas = ExtendedGeoDataFrame(
        geotype=Polygon,
        required_columns=["code", "geometry"],            
    )
dict_to_dataframe(dictionary: dict) -> pd.DataFrame

Converts a dictionary to dataframe, using index as rows

Parameters:

Name Type Description Default
dictionary dict

Input dictionary

required

Returns:

Type Description
DataFrame

pd.DataFrame: Output dataframe

Source code in hydrolib/dhydamo/core/hydamo.py
396
397
398
399
400
401
402
403
404
405
406
407
@validate_arguments
def dict_to_dataframe(self, dictionary: dict) -> pd.DataFrame:
    """Converts a dictionary to dataframe, using index as rows

    Args:
        dictionary (dict): Input dictionary

    Returns:
        pd.DataFrame: Output dataframe
    """

    return pd.DataFrame.from_dict(dictionary, orient="index")
list_to_str(lst: list | np.ndarray) -> str

Converts list to string

Parameters:

Name Type Description Default
lst list

The list

required

Returns:

Name Type Description
str str

The output string

Source code in hydrolib/dhydamo/core/hydamo.py
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def list_to_str(self, lst: list | np.ndarray) -> str:
    """Converts list to string

    Args:
        lst (list): The list

    Returns:
        str: The output string
    """
    if len(lst) == 1:
        string = str(lst)
    else:
        string = " ".join([f"{number:6.3f}" for number in lst])
    return string
snap_to_branch_and_drop(extendedgdf, branches, snap_method: str, maxdist=5, drop_related=True)

Snap the geometries to the branch and drop loose objects

Source code in hydrolib/dhydamo/core/hydamo.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
def snap_to_branch_and_drop(self, extendedgdf, branches, snap_method: str, maxdist=5, drop_related=True):
    """Snap the geometries to the branch and drop loose objects"""

    # Snap the extended geodataframe to branches
    extendedgdf.snap_to_branch(branches, snap_method, maxdist=maxdist)

    # Determine which labels need to be drop for the first object based on
    # nan values for branch_offset.
    drop_idx = extendedgdf[pd.isna(extendedgdf.branch_offset)].index.to_numpy()
    drop_list = [(extendedgdf, drop_idx)]
    logger.info("dropping objects with indices: %s", drop_idx)

    # Find out which labels need to be dropped from related objects
    if drop_related and extendedgdf.related is not None:
        for target_str, relation in extendedgdf.related.items():
            self._recursive_drop_related(drop_list, extendedgdf, drop_idx, target_str, **relation)

    # Drop the relevant rows with the list of labels
    for source, drop_idx in drop_list:
        source.drop(labels=drop_idx, inplace=True)

Network

Source code in hydrolib/dhydamo/core/hydamo.py
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
class Network:
    def __init__(self, hydamo: HyDAMO) -> None:
        """Set class variables

        Args:
            hydamo (HyDAMO): HyDAMO object containign all input data
        """
        self.hydamo = hydamo

        # Mesh 1d offsets
        self.offsets = {}

    @validate_arguments
    def set_branch_order(self, branchids: list, idx: int = None) -> None:
        """
        Group branch ids so that the cross sections are
        interpolated along the branch.

        Parameters
        ----------
        branchids : list
            List of branches to group
        idx : int
            Order number with which to update a branch
        """
        # Get the ids (integers) of the branch names given by the user
        branchidx = np.isin(self.mesh1d.description1d["network_branch_ids"], branchids)
        # Get current order
        branchorder = self.mesh1d.get_values("nbranchorder", as_array=True)
        # Update
        if idx is None:
            branchorder[branchidx] = branchorder.max() + 1
        else:
            if not isinstance(idx, int):
                raise TypeError("Expected integer.")
            branchorder[branchidx] = idx
        # Save
        self.mesh1d.set_values("nbranchorder", branchorder)

    def set_branch_interpolation_modelwide(self) -> None:
        """
        Set cross-section interpolation over nodes on all branches model-wide. I

        Note:
            - Interpolation will be set using branchorder property.
            - Branches are grouped between bifurcations where multiple branches meet.
            - No interpolation is applied over these type of bifurcations.
            - No branch order is set on branch groups consisting of 1 branch.
        """
        self.get_grouped_branches()
        for group in self.branch_groups.values():
            if len(group) > 1:
                self.set_branch_order(group)

    def make_nodes_to_branch_map(self) -> None:
        """Map nodes connected to each branch"""
        # Note: first node is upstream, second node is downstream
        self.nodes_to_branch_map = {
            b: [self.mesh1d.description1d["network_node_ids"][_idx - 1] for _idx in idx]
            for b, idx in zip(
                self.mesh1d.description1d["network_branch_ids"],
                self.mesh1d.get_values("nedge_nodes", as_array=True),
            )
        }

    def make_branches_to_node_map(self) -> None:
        """Map branches connected to each node"""
        self.make_nodes_to_branch_map()
        self.branches_to_node_map = {
            n: [k for k, v in self.nodes_to_branch_map.items() if n in v]
            for n in self.mesh1d.description1d["network_node_ids"]
        }

    @validate_arguments
    def generate_nodes_with_bedlevels(
        self,
        resolve_at_bifurcation_method: str = "min",
        return_reversed_branches: bool = False,
    ):
        """
        Generate nodes with upstream and downstream bedlevels derived from set cross-sections on branch. It takes into
        account whether or not branch order is specified (so interpolation over nodes is set).

        Nodes in between cross-sections on same branch/branch-group are linearly interpolated. Outside are extrapolated
        constant (e.g. end points of branch).

        Branch groups which include branch with reversed direction compared to whole group will be taken into account.
        Use return_reversed_branches=True to return that information

        Specify with resolve_at_bifurcation_method how to resolve bedlevel at bifurcation of more than 2 branches using
        options 'min' (minimum), 'max' (maximum), 'mean' (average).
        """
        assert resolve_at_bifurcation_method in ["min", "max", "mean"], (
            "Incorrect value for "
            "'resolve_at_bifurcation_method' supplied. "
            "Either use 'min', 'max' or 'mean'"
        )
        bedlevels_crs_branches = self.hydamo.crosssections.get_bottom_levels()
        branch_order = self.mesh1d.get_values("nbranchorder", as_array=True)
        self.make_branches_to_node_map()
        self.make_nodes_to_branch_map()
        nodes_dict = {
            n: {"up": [], "down": []} for n in self.branches_to_node_map.keys()
        }
        reserved_branches = []
        for order, (branch, nodes) in tqdm(
            zip(branch_order, self.nodes_to_branch_map.items()),
            total=len(branch_order),
            desc="Getting bedlevels",
        ):
            if order == -1:
                # No branch order so just get upstream and downstream levels
                branch_length = self.branches.loc[branch, "geometry"].length
                subset = bedlevels_crs_branches.loc[
                    bedlevels_crs_branches["branchid"] == branch
                ]
                if subset.empty:
                    continue  # if this happens, crs is not defined. This can be a problem.
                nodes_dict[nodes[0]]["up"].append(
                    np.interp(0.0, subset["chainage"], subset["minz"])
                )
                nodes_dict[nodes[1]]["down"].append(
                    np.interp(branch_length, subset["chainage"], subset["minz"])
                )
            else:
                # In case of branch order, first collect all branches and set in them in order of up- to downstream
                all_branches = [
                    self.mesh1d.description1d["network_branch_ids"][i]
                    for i in np.argwhere(order == branch_order).ravel()
                ]
                all_nodes = [self.nodes_to_branch_map[b] for b in all_branches]
                # First check if any of the branches has a bedlevel from a cross-section profile otherwise skip
                check = all(
                    [
                        bedlevels_crs_branches.loc[
                            bedlevels_crs_branches["branchid"] == b
                        ].empty
                        for b in all_branches
                    ]
                )
                if check:
                    continue  # if this happens, cross-section is not defined. This can be a problem.

                # Check if every branch is from up to down direction. Otherwise fix by reversing
                n = 0
                n_length = len(all_nodes)
                direction = list(np.ones(n_length))
                max_tries = 0
                while n < n_length:
                    up = (
                        np.count_nonzero(
                            all_nodes[n][0] == np.array([x[0] for x in all_nodes])
                        )
                        == 1
                    )
                    down = (
                        np.count_nonzero(
                            all_nodes[n][1] == np.array([x[1] for x in all_nodes])
                        )
                        == 1
                    )
                    if (not up) or (not down):
                        # Reverse
                        all_nodes[n] = list(np.flip(all_nodes[n]))
                        direction[n] = direction[n] * -1
                    n += 1
                    # Check if indeed everything is now in proper direction. Otherwise try again
                    if n == n_length:
                        up = all(
                            [
                                np.count_nonzero(
                                    node[0] == np.array([x[0] for x in all_nodes])
                                )
                                == 1
                                for node in all_nodes
                            ]
                        )
                        down = all(
                            [
                                np.count_nonzero(
                                    node[1] == np.array([x[1] for x in all_nodes])
                                )
                                == 1
                                for node in all_nodes
                            ]
                        )
                        if (not up) or (not down):
                            n = 0
                            max_tries += 1
                    if max_tries > 500:
                        logger.warning(
                            "Can't fix correct directions branch groups %s",
                            all_branches,
                        )
                        break

                # Add reserved branches to return
                reserved_branches.extend(
                    [b for b, d in zip(all_branches, direction) if d == -1]
                )

                # Get most upstream node. Otherwise just pick i_upstream = 0 as starting point
                i_upstream = [
                    i
                    for i, n in enumerate([x[0] for x in all_nodes])
                    if n not in [x[1] for x in all_nodes]
                ]
                if len(i_upstream) == 1:
                    i_upstream = i_upstream[0]
                else:
                    # It could be that branch order group forms a ring. In this case check first which node has more
                    # than 2 branches (bifurcation) or just 1 branch (boundary) connected.
                    i_upstream = [
                        i
                        for i, n in enumerate([x[0] for x in all_nodes])
                        if (len(self.branches_to_node_map[n]) > 2)
                        or (len(self.branches_to_node_map[n]) == 1)
                    ]
                    if len(i_upstream) == 1:
                        i_upstream = i_upstream[0]
                    else:
                        raise ValueError(
                            f"Something is not right with the branch order group {all_branches}"
                        )

                # Now put branch list in correct order
                all_branches_sorted = []
                all_nodes_sorted = []
                direction_sorted = []
                for _ in range(len(all_branches)):
                    all_branches_sorted.append(all_branches[i_upstream])
                    all_nodes_sorted.append(all_nodes[i_upstream])
                    direction_sorted.append(direction[i_upstream])
                    try:
                        i_upstream = [
                            i
                            for i, n in enumerate([x[0] for x in all_nodes])
                            if [x[1] for x in all_nodes][i_upstream] == n
                        ][0]
                    except IndexError:
                        break
                # Stitch chainages and bedlevels together
                chainage, bedlevel = [], []
                branch_length = 0
                for b, d in zip(all_branches_sorted, direction_sorted):
                    subset = bedlevels_crs_branches.loc[
                        bedlevels_crs_branches["branchid"] == b
                    ]
                    chain, bed = subset["chainage"], subset["minz"]
                    # Reverse chainage and bedlevel arrays
                    if d == -1:
                        chain = np.flip(chain)
                        bed = np.flip(bed)
                    chainage.extend(chain + branch_length)
                    bedlevel.extend(bed)
                    branch_length = self.branches.loc[b, "geometry"].length
                # Get chainage of up- and downstream node of loop branch within the overall branch
                if len(all_branches_sorted) == 1:
                    up_node_chainage = 0
                    down_node_chainage = self.branches.loc[
                        all_branches_sorted[0], "geometry"
                    ].length
                else:
                    i = np.argmax(
                        [
                            1 if ((nodes == n) or (list(np.flip(nodes)) == n)) else 0
                            for n in all_nodes_sorted
                        ]
                    )
                    up_node_chainage = sum(
                        [0]
                        + [
                            self.branches.loc[b, "geometry"].length
                            for b, n in zip(
                                all_branches_sorted[:-1], all_nodes_sorted[:-1]
                            )
                        ][: i + 1]
                    )
                    down_node_chainage = sum(
                        [
                            self.branches.loc[b, "geometry"].length
                            for b, n in zip(all_branches_sorted, all_nodes_sorted)
                        ][: i + 1]
                    )
                # Finally interpolate
                nodes_dict[nodes[0]]["up"].append(
                    np.interp(up_node_chainage, chainage, bedlevel)
                )
                nodes_dict[nodes[1]]["down"].append(
                    np.interp(down_node_chainage, chainage, bedlevel)
                )

        # Summarize everything and save
        nodes = list(nodes_dict.keys())
        node_geom = [
            Point(x, y)
            for x, y in zip(
                self.mesh1d.get_values("nnodex"), self.mesh1d.get_values("nnodey")
            )
        ]
        if resolve_at_bifurcation_method == "min":
            upstream_bedlevel = [
                np.min(v["up"]) if len(v["up"]) > 0 else np.nan
                for v in nodes_dict.values()
            ]
            downstream_bedlevel = [
                np.min(v["down"]) if len(v["down"]) > 0 else np.nan
                for v in nodes_dict.values()
            ]
        elif resolve_at_bifurcation_method == "max":
            upstream_bedlevel = [
                np.max(v["up"]) if len(v["up"]) > 0 else np.nan
                for v in nodes_dict.values()
            ]
            downstream_bedlevel = [
                np.max(v["down"]) if len(v["down"]) > 0 else np.nan
                for v in nodes_dict.values()
            ]
        elif resolve_at_bifurcation_method == "mean":
            upstream_bedlevel = [
                np.average(["up"]) if len(v["up"]) > 0 else np.nan
                for v in nodes_dict.values()
            ]
            downstream_bedlevel = [
                np.average(v["down"]) if len(v["down"]) > 0 else np.nan
                for v in nodes_dict.values()
            ]
        else:
            raise NotImplementedError

        self.nodes = gpd.GeoDataFrame(
            index=nodes_dict.keys(),
            data={
                "code": nodes_dict.keys(),
                "upstream_bedlevel": upstream_bedlevel,
                "downstream_bedlevel": downstream_bedlevel,
            },
            geometry=node_geom,
        )

        if return_reversed_branches:
            return list(np.unique(reserved_branches))

    def get_grouped_branches(self) -> None:
        """
        Get grouped branch ids to use in set_branch_order function
        """
        # Get all network data
        branch_ids = self.mesh1d.description1d["network_branch_ids"]
        # node_ids = self.mesh1d.description1d["network_node_ids"]
        # branch_edge_nodes_idx = self.mesh1d.get_values("nedge_nodes", as_array=True)
        # Collect all node ids per branch and all branches per node id
        self.make_nodes_to_branch_map()
        self.make_branches_to_node_map()

        branch_ids_checked = []
        groups = {0: []}
        for branch_id in branch_ids:
            if branch_id in branch_ids_checked:
                continue

            connected_nodes = self.nodes_to_branch_map[branch_id]  # get connected nodes
            for n in connected_nodes:
                b = self.branches_to_node_map[n]  # get connected branches
                if len(b) > 2:
                    continue  # in this case there's a bifurcation so skip
                elif len(b) == 1:
                    groups[list(groups.keys())[-1] + 1] = b  # b is already a list
                    branch_ids_checked.extend(b)
                    continue  # in this case the branch is not connected to other branches. make separate group
                else:
                    # remove branch from connected_branches because we are not interested in it
                    b = [_b for _b in b if _b != branch_id][0]
                    if b in branch_ids_checked:
                        # connected branch is already added to a group, so this means that branch should be added to
                        # that group
                        groups[[k for k, v in groups.items() if b in v][0]].append(
                            branch_id
                        )
                        branch_ids_checked.extend([branch_id])
                    else:
                        groups[list(groups.keys())[-1] + 1] = [
                            branch_id
                        ]  # otherwise add to group
                        branch_ids_checked.extend([branch_id])
                branch_ids_checked = list(np.unique(branch_ids_checked))
        groups.pop(0)  # remove the 0th group because empty

        # The branches are grouped but not fully connected (although routine above should do the trick in theory).
        # Try merging groups if a branch is in multiple groups.
        # In that case we know that branch groups are connected to eachother and is in fact a bigger group
        for b in branch_ids:
            _groups = groups.copy()  # make copy to apply changes on
            _k = -1  # default index
            # Loop over groups
            for k, v in groups.items():
                if b in v:  # if branch in grouped branches
                    if _k == -1:
                        _k = k  # set index to first group found
                    else:
                        _groups[_k].extend(
                            v
                        )  # otherwise add group to first found group
                        _groups[_k] = list(
                            np.unique(_groups[_k])
                        )  # remove duplicates due to add
                        _groups.pop(k)  # and remove group from groups
            groups = _groups.copy()  # copy changed dict over original
        # One pass over all branches should be sufficient to group everything together. Otherwise raise error
        if (
            max(
                [
                    sum([1 if b in v else 0 for k, v in groups.items()])
                    for b in branch_ids
                ]
            )
            > 1
        ):
            raise ValueError(
                f"Still branches contained in multiple groups. Maximum number of groups where this "
                f"happens: {max([sum([1 if b in v else 0 for k, v in groups.items()]) for b in branch_ids])}"
            )

        # save
        self.branch_groups = groups.copy()

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def get_node_idx_offset(
        self, branch_id: str, pt: shapely.geometry.Point, nnodes: int = 1
    ) -> tuple:
        """
        Get the index and offset of a node on a 1d branch.
        The nearest node is looked for.
        """

        # Project the point on the branch
        dist = self.schematised[branch_id].project(pt)

        # Get the branch data from the networkdata
        branchidx = (
            self.mesh1d.description1d["network_branch_ids"].index(
                self.str2chars(branch_id, self.idstrlength)
            )
            + 1
        )
        pt_branch_id = self.mesh1d.get_values("branchidx", as_array=True)
        idx = np.nonzero(pt_branch_id == branchidx)

        # Find nearest offset
        offsets = self.mesh1d.get_values("branchoffset", as_array=True)[idx]
        isorted = np.argsort(np.absolute(offsets - dist))
        isorted = isorted[: min(nnodes, len(isorted))]

        # Get the offset
        offset = [offsets[imin] for imin in isorted]
        # Get the id of the node
        node_id = [idx[0][imin] + 1 for imin in isorted]

        return node_id, offset
__init__(hydamo: HyDAMO) -> None

Set class variables

Parameters:

Name Type Description Default
hydamo HyDAMO

HyDAMO object containign all input data

required
Source code in hydrolib/dhydamo/core/hydamo.py
497
498
499
500
501
502
503
504
505
506
def __init__(self, hydamo: HyDAMO) -> None:
    """Set class variables

    Args:
        hydamo (HyDAMO): HyDAMO object containign all input data
    """
    self.hydamo = hydamo

    # Mesh 1d offsets
    self.offsets = {}
generate_nodes_with_bedlevels(resolve_at_bifurcation_method: str = 'min', return_reversed_branches: bool = False)

Generate nodes with upstream and downstream bedlevels derived from set cross-sections on branch. It takes into account whether or not branch order is specified (so interpolation over nodes is set).

Nodes in between cross-sections on same branch/branch-group are linearly interpolated. Outside are extrapolated constant (e.g. end points of branch).

Branch groups which include branch with reversed direction compared to whole group will be taken into account. Use return_reversed_branches=True to return that information

Specify with resolve_at_bifurcation_method how to resolve bedlevel at bifurcation of more than 2 branches using options 'min' (minimum), 'max' (maximum), 'mean' (average).

Source code in hydrolib/dhydamo/core/hydamo.py
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
@validate_arguments
def generate_nodes_with_bedlevels(
    self,
    resolve_at_bifurcation_method: str = "min",
    return_reversed_branches: bool = False,
):
    """
    Generate nodes with upstream and downstream bedlevels derived from set cross-sections on branch. It takes into
    account whether or not branch order is specified (so interpolation over nodes is set).

    Nodes in between cross-sections on same branch/branch-group are linearly interpolated. Outside are extrapolated
    constant (e.g. end points of branch).

    Branch groups which include branch with reversed direction compared to whole group will be taken into account.
    Use return_reversed_branches=True to return that information

    Specify with resolve_at_bifurcation_method how to resolve bedlevel at bifurcation of more than 2 branches using
    options 'min' (minimum), 'max' (maximum), 'mean' (average).
    """
    assert resolve_at_bifurcation_method in ["min", "max", "mean"], (
        "Incorrect value for "
        "'resolve_at_bifurcation_method' supplied. "
        "Either use 'min', 'max' or 'mean'"
    )
    bedlevels_crs_branches = self.hydamo.crosssections.get_bottom_levels()
    branch_order = self.mesh1d.get_values("nbranchorder", as_array=True)
    self.make_branches_to_node_map()
    self.make_nodes_to_branch_map()
    nodes_dict = {
        n: {"up": [], "down": []} for n in self.branches_to_node_map.keys()
    }
    reserved_branches = []
    for order, (branch, nodes) in tqdm(
        zip(branch_order, self.nodes_to_branch_map.items()),
        total=len(branch_order),
        desc="Getting bedlevels",
    ):
        if order == -1:
            # No branch order so just get upstream and downstream levels
            branch_length = self.branches.loc[branch, "geometry"].length
            subset = bedlevels_crs_branches.loc[
                bedlevels_crs_branches["branchid"] == branch
            ]
            if subset.empty:
                continue  # if this happens, crs is not defined. This can be a problem.
            nodes_dict[nodes[0]]["up"].append(
                np.interp(0.0, subset["chainage"], subset["minz"])
            )
            nodes_dict[nodes[1]]["down"].append(
                np.interp(branch_length, subset["chainage"], subset["minz"])
            )
        else:
            # In case of branch order, first collect all branches and set in them in order of up- to downstream
            all_branches = [
                self.mesh1d.description1d["network_branch_ids"][i]
                for i in np.argwhere(order == branch_order).ravel()
            ]
            all_nodes = [self.nodes_to_branch_map[b] for b in all_branches]
            # First check if any of the branches has a bedlevel from a cross-section profile otherwise skip
            check = all(
                [
                    bedlevels_crs_branches.loc[
                        bedlevels_crs_branches["branchid"] == b
                    ].empty
                    for b in all_branches
                ]
            )
            if check:
                continue  # if this happens, cross-section is not defined. This can be a problem.

            # Check if every branch is from up to down direction. Otherwise fix by reversing
            n = 0
            n_length = len(all_nodes)
            direction = list(np.ones(n_length))
            max_tries = 0
            while n < n_length:
                up = (
                    np.count_nonzero(
                        all_nodes[n][0] == np.array([x[0] for x in all_nodes])
                    )
                    == 1
                )
                down = (
                    np.count_nonzero(
                        all_nodes[n][1] == np.array([x[1] for x in all_nodes])
                    )
                    == 1
                )
                if (not up) or (not down):
                    # Reverse
                    all_nodes[n] = list(np.flip(all_nodes[n]))
                    direction[n] = direction[n] * -1
                n += 1
                # Check if indeed everything is now in proper direction. Otherwise try again
                if n == n_length:
                    up = all(
                        [
                            np.count_nonzero(
                                node[0] == np.array([x[0] for x in all_nodes])
                            )
                            == 1
                            for node in all_nodes
                        ]
                    )
                    down = all(
                        [
                            np.count_nonzero(
                                node[1] == np.array([x[1] for x in all_nodes])
                            )
                            == 1
                            for node in all_nodes
                        ]
                    )
                    if (not up) or (not down):
                        n = 0
                        max_tries += 1
                if max_tries > 500:
                    logger.warning(
                        "Can't fix correct directions branch groups %s",
                        all_branches,
                    )
                    break

            # Add reserved branches to return
            reserved_branches.extend(
                [b for b, d in zip(all_branches, direction) if d == -1]
            )

            # Get most upstream node. Otherwise just pick i_upstream = 0 as starting point
            i_upstream = [
                i
                for i, n in enumerate([x[0] for x in all_nodes])
                if n not in [x[1] for x in all_nodes]
            ]
            if len(i_upstream) == 1:
                i_upstream = i_upstream[0]
            else:
                # It could be that branch order group forms a ring. In this case check first which node has more
                # than 2 branches (bifurcation) or just 1 branch (boundary) connected.
                i_upstream = [
                    i
                    for i, n in enumerate([x[0] for x in all_nodes])
                    if (len(self.branches_to_node_map[n]) > 2)
                    or (len(self.branches_to_node_map[n]) == 1)
                ]
                if len(i_upstream) == 1:
                    i_upstream = i_upstream[0]
                else:
                    raise ValueError(
                        f"Something is not right with the branch order group {all_branches}"
                    )

            # Now put branch list in correct order
            all_branches_sorted = []
            all_nodes_sorted = []
            direction_sorted = []
            for _ in range(len(all_branches)):
                all_branches_sorted.append(all_branches[i_upstream])
                all_nodes_sorted.append(all_nodes[i_upstream])
                direction_sorted.append(direction[i_upstream])
                try:
                    i_upstream = [
                        i
                        for i, n in enumerate([x[0] for x in all_nodes])
                        if [x[1] for x in all_nodes][i_upstream] == n
                    ][0]
                except IndexError:
                    break
            # Stitch chainages and bedlevels together
            chainage, bedlevel = [], []
            branch_length = 0
            for b, d in zip(all_branches_sorted, direction_sorted):
                subset = bedlevels_crs_branches.loc[
                    bedlevels_crs_branches["branchid"] == b
                ]
                chain, bed = subset["chainage"], subset["minz"]
                # Reverse chainage and bedlevel arrays
                if d == -1:
                    chain = np.flip(chain)
                    bed = np.flip(bed)
                chainage.extend(chain + branch_length)
                bedlevel.extend(bed)
                branch_length = self.branches.loc[b, "geometry"].length
            # Get chainage of up- and downstream node of loop branch within the overall branch
            if len(all_branches_sorted) == 1:
                up_node_chainage = 0
                down_node_chainage = self.branches.loc[
                    all_branches_sorted[0], "geometry"
                ].length
            else:
                i = np.argmax(
                    [
                        1 if ((nodes == n) or (list(np.flip(nodes)) == n)) else 0
                        for n in all_nodes_sorted
                    ]
                )
                up_node_chainage = sum(
                    [0]
                    + [
                        self.branches.loc[b, "geometry"].length
                        for b, n in zip(
                            all_branches_sorted[:-1], all_nodes_sorted[:-1]
                        )
                    ][: i + 1]
                )
                down_node_chainage = sum(
                    [
                        self.branches.loc[b, "geometry"].length
                        for b, n in zip(all_branches_sorted, all_nodes_sorted)
                    ][: i + 1]
                )
            # Finally interpolate
            nodes_dict[nodes[0]]["up"].append(
                np.interp(up_node_chainage, chainage, bedlevel)
            )
            nodes_dict[nodes[1]]["down"].append(
                np.interp(down_node_chainage, chainage, bedlevel)
            )

    # Summarize everything and save
    nodes = list(nodes_dict.keys())
    node_geom = [
        Point(x, y)
        for x, y in zip(
            self.mesh1d.get_values("nnodex"), self.mesh1d.get_values("nnodey")
        )
    ]
    if resolve_at_bifurcation_method == "min":
        upstream_bedlevel = [
            np.min(v["up"]) if len(v["up"]) > 0 else np.nan
            for v in nodes_dict.values()
        ]
        downstream_bedlevel = [
            np.min(v["down"]) if len(v["down"]) > 0 else np.nan
            for v in nodes_dict.values()
        ]
    elif resolve_at_bifurcation_method == "max":
        upstream_bedlevel = [
            np.max(v["up"]) if len(v["up"]) > 0 else np.nan
            for v in nodes_dict.values()
        ]
        downstream_bedlevel = [
            np.max(v["down"]) if len(v["down"]) > 0 else np.nan
            for v in nodes_dict.values()
        ]
    elif resolve_at_bifurcation_method == "mean":
        upstream_bedlevel = [
            np.average(["up"]) if len(v["up"]) > 0 else np.nan
            for v in nodes_dict.values()
        ]
        downstream_bedlevel = [
            np.average(v["down"]) if len(v["down"]) > 0 else np.nan
            for v in nodes_dict.values()
        ]
    else:
        raise NotImplementedError

    self.nodes = gpd.GeoDataFrame(
        index=nodes_dict.keys(),
        data={
            "code": nodes_dict.keys(),
            "upstream_bedlevel": upstream_bedlevel,
            "downstream_bedlevel": downstream_bedlevel,
        },
        geometry=node_geom,
    )

    if return_reversed_branches:
        return list(np.unique(reserved_branches))
get_grouped_branches() -> None

Get grouped branch ids to use in set_branch_order function

Source code in hydrolib/dhydamo/core/hydamo.py
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
def get_grouped_branches(self) -> None:
    """
    Get grouped branch ids to use in set_branch_order function
    """
    # Get all network data
    branch_ids = self.mesh1d.description1d["network_branch_ids"]
    # node_ids = self.mesh1d.description1d["network_node_ids"]
    # branch_edge_nodes_idx = self.mesh1d.get_values("nedge_nodes", as_array=True)
    # Collect all node ids per branch and all branches per node id
    self.make_nodes_to_branch_map()
    self.make_branches_to_node_map()

    branch_ids_checked = []
    groups = {0: []}
    for branch_id in branch_ids:
        if branch_id in branch_ids_checked:
            continue

        connected_nodes = self.nodes_to_branch_map[branch_id]  # get connected nodes
        for n in connected_nodes:
            b = self.branches_to_node_map[n]  # get connected branches
            if len(b) > 2:
                continue  # in this case there's a bifurcation so skip
            elif len(b) == 1:
                groups[list(groups.keys())[-1] + 1] = b  # b is already a list
                branch_ids_checked.extend(b)
                continue  # in this case the branch is not connected to other branches. make separate group
            else:
                # remove branch from connected_branches because we are not interested in it
                b = [_b for _b in b if _b != branch_id][0]
                if b in branch_ids_checked:
                    # connected branch is already added to a group, so this means that branch should be added to
                    # that group
                    groups[[k for k, v in groups.items() if b in v][0]].append(
                        branch_id
                    )
                    branch_ids_checked.extend([branch_id])
                else:
                    groups[list(groups.keys())[-1] + 1] = [
                        branch_id
                    ]  # otherwise add to group
                    branch_ids_checked.extend([branch_id])
            branch_ids_checked = list(np.unique(branch_ids_checked))
    groups.pop(0)  # remove the 0th group because empty

    # The branches are grouped but not fully connected (although routine above should do the trick in theory).
    # Try merging groups if a branch is in multiple groups.
    # In that case we know that branch groups are connected to eachother and is in fact a bigger group
    for b in branch_ids:
        _groups = groups.copy()  # make copy to apply changes on
        _k = -1  # default index
        # Loop over groups
        for k, v in groups.items():
            if b in v:  # if branch in grouped branches
                if _k == -1:
                    _k = k  # set index to first group found
                else:
                    _groups[_k].extend(
                        v
                    )  # otherwise add group to first found group
                    _groups[_k] = list(
                        np.unique(_groups[_k])
                    )  # remove duplicates due to add
                    _groups.pop(k)  # and remove group from groups
        groups = _groups.copy()  # copy changed dict over original
    # One pass over all branches should be sufficient to group everything together. Otherwise raise error
    if (
        max(
            [
                sum([1 if b in v else 0 for k, v in groups.items()])
                for b in branch_ids
            ]
        )
        > 1
    ):
        raise ValueError(
            f"Still branches contained in multiple groups. Maximum number of groups where this "
            f"happens: {max([sum([1 if b in v else 0 for k, v in groups.items()]) for b in branch_ids])}"
        )

    # save
    self.branch_groups = groups.copy()
get_node_idx_offset(branch_id: str, pt: shapely.geometry.Point, nnodes: int = 1) -> tuple

Get the index and offset of a node on a 1d branch. The nearest node is looked for.

Source code in hydrolib/dhydamo/core/hydamo.py
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def get_node_idx_offset(
    self, branch_id: str, pt: shapely.geometry.Point, nnodes: int = 1
) -> tuple:
    """
    Get the index and offset of a node on a 1d branch.
    The nearest node is looked for.
    """

    # Project the point on the branch
    dist = self.schematised[branch_id].project(pt)

    # Get the branch data from the networkdata
    branchidx = (
        self.mesh1d.description1d["network_branch_ids"].index(
            self.str2chars(branch_id, self.idstrlength)
        )
        + 1
    )
    pt_branch_id = self.mesh1d.get_values("branchidx", as_array=True)
    idx = np.nonzero(pt_branch_id == branchidx)

    # Find nearest offset
    offsets = self.mesh1d.get_values("branchoffset", as_array=True)[idx]
    isorted = np.argsort(np.absolute(offsets - dist))
    isorted = isorted[: min(nnodes, len(isorted))]

    # Get the offset
    offset = [offsets[imin] for imin in isorted]
    # Get the id of the node
    node_id = [idx[0][imin] + 1 for imin in isorted]

    return node_id, offset
make_branches_to_node_map() -> None

Map branches connected to each node

Source code in hydrolib/dhydamo/core/hydamo.py
561
562
563
564
565
566
567
def make_branches_to_node_map(self) -> None:
    """Map branches connected to each node"""
    self.make_nodes_to_branch_map()
    self.branches_to_node_map = {
        n: [k for k, v in self.nodes_to_branch_map.items() if n in v]
        for n in self.mesh1d.description1d["network_node_ids"]
    }
make_nodes_to_branch_map() -> None

Map nodes connected to each branch

Source code in hydrolib/dhydamo/core/hydamo.py
550
551
552
553
554
555
556
557
558
559
def make_nodes_to_branch_map(self) -> None:
    """Map nodes connected to each branch"""
    # Note: first node is upstream, second node is downstream
    self.nodes_to_branch_map = {
        b: [self.mesh1d.description1d["network_node_ids"][_idx - 1] for _idx in idx]
        for b, idx in zip(
            self.mesh1d.description1d["network_branch_ids"],
            self.mesh1d.get_values("nedge_nodes", as_array=True),
        )
    }
set_branch_interpolation_modelwide() -> None

Set cross-section interpolation over nodes on all branches model-wide. I

Note
  • Interpolation will be set using branchorder property.
  • Branches are grouped between bifurcations where multiple branches meet.
  • No interpolation is applied over these type of bifurcations.
  • No branch order is set on branch groups consisting of 1 branch.
Source code in hydrolib/dhydamo/core/hydamo.py
535
536
537
538
539
540
541
542
543
544
545
546
547
548
def set_branch_interpolation_modelwide(self) -> None:
    """
    Set cross-section interpolation over nodes on all branches model-wide. I

    Note:
        - Interpolation will be set using branchorder property.
        - Branches are grouped between bifurcations where multiple branches meet.
        - No interpolation is applied over these type of bifurcations.
        - No branch order is set on branch groups consisting of 1 branch.
    """
    self.get_grouped_branches()
    for group in self.branch_groups.values():
        if len(group) > 1:
            self.set_branch_order(group)
set_branch_order(branchids: list, idx: int = None) -> None

Group branch ids so that the cross sections are interpolated along the branch.

Parameters

branchids : list List of branches to group idx : int Order number with which to update a branch

Source code in hydrolib/dhydamo/core/hydamo.py
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
@validate_arguments
def set_branch_order(self, branchids: list, idx: int = None) -> None:
    """
    Group branch ids so that the cross sections are
    interpolated along the branch.

    Parameters
    ----------
    branchids : list
        List of branches to group
    idx : int
        Order number with which to update a branch
    """
    # Get the ids (integers) of the branch names given by the user
    branchidx = np.isin(self.mesh1d.description1d["network_branch_ids"], branchids)
    # Get current order
    branchorder = self.mesh1d.get_values("nbranchorder", as_array=True)
    # Update
    if idx is None:
        branchorder[branchidx] = branchorder.max() + 1
    else:
        if not isinstance(idx, int):
            raise TypeError("Expected integer.")
        branchorder[branchidx] = idx
    # Save
    self.mesh1d.set_values("nbranchorder", branchorder)

ObservationPoints

Source code in hydrolib/dhydamo/core/hydamo.py
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
class ObservationPoints:
    def __init__(self, hydamo):
        self.hydamo = hydamo
        self.observation_points = gpd.GeoDataFrame().set_geometry([])

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def add_points(
        self, crds: list, names: list, locationTypes=None, snap_distance: float = 5.0
    ) -> None:
        """
        Method to add observation points to schematisation. Observation points can be of type '1d' or '2d'. 1d-points are snapped to the branch.

        Parameters
        ----------
        crds : Nx2 list or array
            x and y coordinates of observation points
        names : str or list
            names of the observation points
        locationTypes:  str or list
            type of the observationpoints: 1d or 2d
        snap_distance : float (default is 5 m)
            1d observation poinst within this distance to a branch will be snapped to it. Otherwise they are discarded.
        """
        if isinstance(names, str):
            names = [names]
            crds = [crds]

        if locationTypes is not None:
            if isinstance(locationTypes, str):
                locationTypes = [locationTypes]

            # split 1d and 2d points, as the first ones need to be snapped to branches
            obs2d = gpd.GeoDataFrame()
            obs2d["name"] = [
                n for nn, n in enumerate(names) if locationTypes[nn] == "2d"
            ]
            obs2d["locationtype"] = "2d"
            obs2d = obs2d.set_geometry([
                Point(*pt) if not isinstance(pt, Point) else pt
                for ipt, pt in enumerate(crds)
                if (locationTypes[ipt] == "2d")
            ])
            obs2d["x"] = [pt.coords[0][0] for pt in obs2d["geometry"]]
            obs2d["y"] = [pt.coords[0][1] for pt in obs2d["geometry"]]
            names1d = [n for n_i, n in enumerate(names) if locationTypes[n_i] == "1d"]
            crds1d = [c for c_i, c in enumerate(crds) if locationTypes[c_i] == "1d"]
        else:
            names1d = names
            crds1d = crds

        obs1d = gpd.GeoDataFrame()
        obs1d["name"] = names1d
        obs1d = obs1d.set_geometry([
            Point(*pt) if not isinstance(pt, Point) else pt for pt in crds1d
        ])
        obs1d["locationtype"] = "1d"
        find_nearest_branch(
            self.hydamo.branches, obs1d, method="overal", maxdist=snap_distance
        )
        obs1d.rename(
            columns={"branch_id": "branchid", "branch_offset": "chainage"}, inplace=True
        )

        obs = pd.concat([obs1d, obs2d], sort=True) if locationTypes is not None else obs1d

        obs.dropna(how="all", axis=1, inplace=True)

        # Add to dataframe
        if self.observation_points.empty:
            self.observation_points = obs
        else:
            self.observation_points = pd.concat([self.observation_points, obs], ignore_index=True)
add_points(crds: list, names: list, locationTypes=None, snap_distance: float = 5.0) -> None

Method to add observation points to schematisation. Observation points can be of type '1d' or '2d'. 1d-points are snapped to the branch.

Parameters

crds : Nx2 list or array x and y coordinates of observation points names : str or list names of the observation points locationTypes: str or list type of the observationpoints: 1d or 2d snap_distance : float (default is 5 m) 1d observation poinst within this distance to a branch will be snapped to it. Otherwise they are discarded.

Source code in hydrolib/dhydamo/core/hydamo.py
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def add_points(
    self, crds: list, names: list, locationTypes=None, snap_distance: float = 5.0
) -> None:
    """
    Method to add observation points to schematisation. Observation points can be of type '1d' or '2d'. 1d-points are snapped to the branch.

    Parameters
    ----------
    crds : Nx2 list or array
        x and y coordinates of observation points
    names : str or list
        names of the observation points
    locationTypes:  str or list
        type of the observationpoints: 1d or 2d
    snap_distance : float (default is 5 m)
        1d observation poinst within this distance to a branch will be snapped to it. Otherwise they are discarded.
    """
    if isinstance(names, str):
        names = [names]
        crds = [crds]

    if locationTypes is not None:
        if isinstance(locationTypes, str):
            locationTypes = [locationTypes]

        # split 1d and 2d points, as the first ones need to be snapped to branches
        obs2d = gpd.GeoDataFrame()
        obs2d["name"] = [
            n for nn, n in enumerate(names) if locationTypes[nn] == "2d"
        ]
        obs2d["locationtype"] = "2d"
        obs2d = obs2d.set_geometry([
            Point(*pt) if not isinstance(pt, Point) else pt
            for ipt, pt in enumerate(crds)
            if (locationTypes[ipt] == "2d")
        ])
        obs2d["x"] = [pt.coords[0][0] for pt in obs2d["geometry"]]
        obs2d["y"] = [pt.coords[0][1] for pt in obs2d["geometry"]]
        names1d = [n for n_i, n in enumerate(names) if locationTypes[n_i] == "1d"]
        crds1d = [c for c_i, c in enumerate(crds) if locationTypes[c_i] == "1d"]
    else:
        names1d = names
        crds1d = crds

    obs1d = gpd.GeoDataFrame()
    obs1d["name"] = names1d
    obs1d = obs1d.set_geometry([
        Point(*pt) if not isinstance(pt, Point) else pt for pt in crds1d
    ])
    obs1d["locationtype"] = "1d"
    find_nearest_branch(
        self.hydamo.branches, obs1d, method="overal", maxdist=snap_distance
    )
    obs1d.rename(
        columns={"branch_id": "branchid", "branch_offset": "chainage"}, inplace=True
    )

    obs = pd.concat([obs1d, obs2d], sort=True) if locationTypes is not None else obs1d

    obs.dropna(how="all", axis=1, inplace=True)

    # Add to dataframe
    if self.observation_points.empty:
        self.observation_points = obs
    else:
        self.observation_points = pd.concat([self.observation_points, obs], ignore_index=True)

Structures

Source code in hydrolib/dhydamo/core/hydamo.py
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
class Structures:
    def __init__(self, hydamo):
        self.hydamo = hydamo
        self.generalstructures_df = pd.DataFrame()
        self.rweirs_df = pd.DataFrame()
        self.orifices_df = pd.DataFrame()
        self.uweirs_df = pd.DataFrame()
        self.culverts_df = pd.DataFrame()
        self.bridges_df = pd.DataFrame()
        self.pumps_df = pd.DataFrame()
        self.compounds_df = pd.DataFrame()

        self.convert = StructuresIO(self)

    def check_branchid_chainage(self, branchid, chainage):
        # Check if the ID exists
        if branchid not in self.hydamo.branches["code"]:
            raise ValueError(
                f"branchid {branchid} not present. Give an existing branch."
            )

        # Get the branch
        branch = self.hydamo.branches.at[branchid, "geometry"]

        # Check the limits
        if chainage < 0.0:
            raise ValueError(
                f"Chainage {chainage} is outside the branch range (0.0 - {branch.length})."
            )
        if chainage > branch.length:
            raise ValueError(
                f"Chainage {chainage} is outside the branch length (0.0 - {branch.length})."
            )

    @validate_arguments
    def add_rweir(
        self,
        id: str = None,
        name: str | float | None = None,
        branchid: str = None,
        chainage: float = None,
        crestlevel: float = None,
        crestwidth: float = None,
        corrcoeff: float = None,
        usevelocityheight: str = "true",
        allowedflowdir: str = "both",
    ) -> None:
        """
        Function to add a regular weir. Arguments correspond to the required input of DFlowFM.
        """
        # Check branchid chainage
        self.check_branchid_chainage(branchid, chainage)

        dct = pd.DataFrame(
            {
                "id": id,
                "name": name,
                "branchid": branchid,
                "chainage": chainage,
                "crestlevel": crestlevel,
                "crestwidth": crestwidth,
                "corrcoeff": corrcoeff,
                "usevelocityheight": usevelocityheight,
                "allowedflowdir": allowedflowdir,
            },
            index=[id],
        )
        self.rweirs_df = pd.concat([self.rweirs_df, dct], ignore_index=True)

    @validate_arguments
    def add_orifice(
        self,
        id: str = None,
        name:  str | float | None = None,
        branchid: str = None,
        chainage: float = None,
        crestlevel: float = None,
        crestwidth: float = None,
        corrcoeff: float = None,
        usevelocityheight: str = "true",
        allowedflowdir: str = "both",
        gateloweredgelevel: float = None,
        uselimitflowpos: str = None,
        limitflowpos: float = None,
        uselimitflowneg: str = None,
        limitflowneg: float = None,
    ) -> None:
        """
        Function to add a orifice. Arguments correspond to the required input of DFlowFM.
        """
        # Check branchid chainage
        self.check_branchid_chainage(branchid, chainage)

        dct = pd.DataFrame(
            {
                "id": id,
                "name": name,
                "branchid": branchid,
                "chainage": chainage,
                "crestlevel": crestlevel,
                "crestwidth": crestwidth,
                "corrcoeff": corrcoeff,
                "usevelocityheight": usevelocityheight,
                "allowedflowdir": allowedflowdir,
                "gateloweredgelevel": gateloweredgelevel,
                "uselimitflowpos": uselimitflowpos,
                "limitflowpos": limitflowpos,
                "uselimitflowneg": uselimitflowneg,
                "limitflowneg": limitflowneg,
            },
            index=[id],
        )
        self.orifices_df = pd.concat([self.orifices_df, dct], ignore_index=True)

    @validate_arguments
    def add_uweir(
        self,
        id: str = None,
        name:  str | float | None = None,
        branchid: str = None,
        chainage: float = None,
        crestlevel: float = None,
        crestwidth: float = None,
        dischargecoeff: float = None,
        usevelocityheight: str = "true",
        allowedflowdir: str = "both",
        numlevels: float = None,
        yvalues: str = None,
        zvalues: str = None,
    ) -> None:
        """
        Function to add a universalweir. Arguments correspond to the required input of DFlowFM.
        """
        # Check branchid chainage
        self.check_branchid_chainage(branchid, chainage)

        dct = pd.DataFrame(
            {
                "id": id,
                "name": name,
                "branchid": branchid,
                "chainage": chainage,
                "crestlevel": crestlevel,
                "crestwidth": crestwidth,
                "dischargecoeff": dischargecoeff,
                "usevelocityheight": usevelocityheight,
                "numlevels": numlevels,
                "allowedflowdir": allowedflowdir,
                "yvalues": yvalues,
                "zvalues": zvalues,
            },
            index=[id],
        )
        self.uweirs_df = pd.concat([self.uweirs_df, dct], ignore_index=True)

    @validate_arguments
    def add_bridge(
        self,
        id: str = None,
        name:  str | float | None = None,
        branchid: str = None,
        chainage: float = None,
        length: float = None,
        inletlosscoeff: float = None,
        outletlosscoeff: float = None,
        csdefid: str = None,
        shift: float = None,
        allowedflowdir: str = "both",
        frictiontype: str = None,
        friction: float = None,
    ) -> None:
        # Check branchid chainage
        self.check_branchid_chainage(branchid, chainage)

        # map HyDAMO definition to D-Hydro definition
        frictiontype = self.hydamo.roughness_mapping[frictiontype]

        dct = pd.DataFrame(
            {
                "id": id,
                "name": name,
                "branchid": branchid,
                "chainage": chainage,
                "length": length,
                "inletlosscoeff": inletlosscoeff,
                "outletlosscoeff": outletlosscoeff,
                "csdefid": csdefid,
                "shift": shift,
                "allowedflowdir": allowedflowdir,
                "frictiontype": frictiontype,
                "friction": friction,
            },
            index=[id],
        )
        self.bridges_df = pd.concat([self.bridges_df, dct], ignore_index=True)

    @validate_arguments
    def add_culvert(
        self,
        id: str = None,
        name:  str | float | None = None,
        branchid: str = None,
        chainage: float = None,
        leftlevel: float = None,
        rightlevel: float = None,
        length: float = None,
        inletlosscoeff: float = None,
        outletlosscoeff: float = None,
        crosssection: dict = None,
        allowedflowdir: str = "both",
        valveonoff: int = 0,
        numlosscoeff: int = None,
        valveopeningheight: float = 0,
        relopening: list = None,
        losscoeff: list = None,
        bedfrictiontype: str = None,
        bedfriction: float = None,
    ) -> None:
        # Check branchid chainage
        self.check_branchid_chainage(branchid, chainage)

        if crosssection["shape"] == "circle":
            definition = self.hydamo.crosssections.add_circle_definition(
                crosssection["diameter"], bedfrictiontype, bedfriction, name=id
            )
        elif crosssection["shape"] == "rectangle":
            definition = self.hydamo.crosssections.add_rectangle_definition(
                crosssection["height"],
                crosssection["width"],
                crosssection["closed"],
                bedfrictiontype,
                bedfriction,
                name=id,
            )
        else:
            raise NotImplementedError(
                f'Cross section with shape "{crosssection["shape"]}" not implemented.'
            )

        bedfrictiontype = self.hydamo.roughness_mapping[bedfrictiontype]

        dct = pd.DataFrame(
            {
                "id": id,
                "name": name,
                "branchid": branchid,
                "chainage": chainage,
                "rightlevel": rightlevel,
                "leftlevel": leftlevel,
                "length": length,
                "inletlosscoeff": inletlosscoeff,
                "outletlosscoeff": outletlosscoeff,
                "csdefid": definition,
                "bedfrictiontype": bedfrictiontype,
                "bedfriction": bedfriction,
                "allowedflowdir": allowedflowdir,
                "valveonoff": valveonoff,
                "numlosscoeff": numlosscoeff,
                "valveopeningheight": valveopeningheight,
                "relopening": [relopening],
                "losscoeff": [losscoeff],
            },
            index=[id],
        )

        self.culverts_df = pd.concat([self.culverts_df, dct], ignore_index=True)

    @validate_arguments
    def add_pump(
        self,
        id: str = None,
        name:  str | float | None = None,
        branchid: str = None,
        chainage: float = None,
        orientation: str = "positive",
        numstages: int = 1,
        controlside: str = "suctionSide",
        capacity: float = None,
        startlevelsuctionside: list = None,
        stoplevelsuctionside: list = None,
        startleveldeliveryside: list = None,
        stopleveldeliveryside: list = None,
    ) -> None:
        # Check branchid chainage
        self.check_branchid_chainage(branchid, chainage)

        dct = pd.DataFrame(
            {
                "id": id,
                "name": name,
                "branchid": branchid,
                "chainage": chainage,
                "orientation": orientation,
                "numstages": numstages,
                "controlside": controlside,
                "capacity": capacity,
                "startlevelsuctionside": [startlevelsuctionside],
                "stoplevelsuctionside": [stoplevelsuctionside],
                "startleveldeliveryside": [startleveldeliveryside],
                "stopleveldeliveryside": [stopleveldeliveryside],
            },
            index=[id],
        )
        self.pumps_df = pd.concat([self.pumps_df, dct], ignore_index=True)

    @validate_arguments
    def add_compound(self, id:  str | float | None = None, structureids: list = None) -> None:
        structurestring = ";".join([f"{s}" for s in structureids])
        numstructures = len(structureids)
        dct = pd.DataFrame(
            {
                "id": id,
                "name": id,
                "numstructures": numstructures,
                "structureids": structurestring,
            },
            index=[id],
        )
        self.compounds_df = pd.concat([self.compounds_df, dct], ignore_index=True)

    @validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
    def as_dataframe(
        self,
        generalstructures: bool = False,
        pumps: bool = False,
        rweirs: bool = False,
        bridges: bool = False,
        culverts: bool = False,
        uweirs: bool = False,
        orifices: bool = False,
        compounds: bool = False,
    ) -> pd.DataFrame:
        """
        Returns a dataframe with the structures. Specify with the keyword arguments what structure types need to be returned.
        """
        dfs = []
        for df, descr, add in zip(
            [
                self.generalstructures_df,
                self.culverts_df,
                self.rweirs_df,
                self.bridges_df,
                self.pumps_df,
                self.uweirs_df,
                self.orifices_df,
                self.compounds_df,
            ],
            [
                "generalstructures",
                "culvert",
                "weir",
                "bridge",
                "pump",
                "uweir",
                "orifice",
                "compound",
            ],
            [
                generalstructures,
                culverts,
                rweirs,
                bridges,
                pumps,
                uweirs,
                orifices,
                compounds,
            ],
        ):
            if any(df) and add:
                df = df.copy()
                df.insert(loc=0, column="structype", value=descr, allow_duplicates=True)
                dfs.append(df)

        if len(dfs) > 0:
            return pd.concat(dfs, sort=False, ignore_index=True)
add_orifice(id: str = None, name: str | float | None = None, branchid: str = None, chainage: float = None, crestlevel: float = None, crestwidth: float = None, corrcoeff: float = None, usevelocityheight: str = 'true', allowedflowdir: str = 'both', gateloweredgelevel: float = None, uselimitflowpos: str = None, limitflowpos: float = None, uselimitflowneg: str = None, limitflowneg: float = None) -> None

Function to add a orifice. Arguments correspond to the required input of DFlowFM.

Source code in hydrolib/dhydamo/core/hydamo.py
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
@validate_arguments
def add_orifice(
    self,
    id: str = None,
    name:  str | float | None = None,
    branchid: str = None,
    chainage: float = None,
    crestlevel: float = None,
    crestwidth: float = None,
    corrcoeff: float = None,
    usevelocityheight: str = "true",
    allowedflowdir: str = "both",
    gateloweredgelevel: float = None,
    uselimitflowpos: str = None,
    limitflowpos: float = None,
    uselimitflowneg: str = None,
    limitflowneg: float = None,
) -> None:
    """
    Function to add a orifice. Arguments correspond to the required input of DFlowFM.
    """
    # Check branchid chainage
    self.check_branchid_chainage(branchid, chainage)

    dct = pd.DataFrame(
        {
            "id": id,
            "name": name,
            "branchid": branchid,
            "chainage": chainage,
            "crestlevel": crestlevel,
            "crestwidth": crestwidth,
            "corrcoeff": corrcoeff,
            "usevelocityheight": usevelocityheight,
            "allowedflowdir": allowedflowdir,
            "gateloweredgelevel": gateloweredgelevel,
            "uselimitflowpos": uselimitflowpos,
            "limitflowpos": limitflowpos,
            "uselimitflowneg": uselimitflowneg,
            "limitflowneg": limitflowneg,
        },
        index=[id],
    )
    self.orifices_df = pd.concat([self.orifices_df, dct], ignore_index=True)
add_rweir(id: str = None, name: str | float | None = None, branchid: str = None, chainage: float = None, crestlevel: float = None, crestwidth: float = None, corrcoeff: float = None, usevelocityheight: str = 'true', allowedflowdir: str = 'both') -> None

Function to add a regular weir. Arguments correspond to the required input of DFlowFM.

Source code in hydrolib/dhydamo/core/hydamo.py
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
@validate_arguments
def add_rweir(
    self,
    id: str = None,
    name: str | float | None = None,
    branchid: str = None,
    chainage: float = None,
    crestlevel: float = None,
    crestwidth: float = None,
    corrcoeff: float = None,
    usevelocityheight: str = "true",
    allowedflowdir: str = "both",
) -> None:
    """
    Function to add a regular weir. Arguments correspond to the required input of DFlowFM.
    """
    # Check branchid chainage
    self.check_branchid_chainage(branchid, chainage)

    dct = pd.DataFrame(
        {
            "id": id,
            "name": name,
            "branchid": branchid,
            "chainage": chainage,
            "crestlevel": crestlevel,
            "crestwidth": crestwidth,
            "corrcoeff": corrcoeff,
            "usevelocityheight": usevelocityheight,
            "allowedflowdir": allowedflowdir,
        },
        index=[id],
    )
    self.rweirs_df = pd.concat([self.rweirs_df, dct], ignore_index=True)
add_uweir(id: str = None, name: str | float | None = None, branchid: str = None, chainage: float = None, crestlevel: float = None, crestwidth: float = None, dischargecoeff: float = None, usevelocityheight: str = 'true', allowedflowdir: str = 'both', numlevels: float = None, yvalues: str = None, zvalues: str = None) -> None

Function to add a universalweir. Arguments correspond to the required input of DFlowFM.

Source code in hydrolib/dhydamo/core/hydamo.py
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
@validate_arguments
def add_uweir(
    self,
    id: str = None,
    name:  str | float | None = None,
    branchid: str = None,
    chainage: float = None,
    crestlevel: float = None,
    crestwidth: float = None,
    dischargecoeff: float = None,
    usevelocityheight: str = "true",
    allowedflowdir: str = "both",
    numlevels: float = None,
    yvalues: str = None,
    zvalues: str = None,
) -> None:
    """
    Function to add a universalweir. Arguments correspond to the required input of DFlowFM.
    """
    # Check branchid chainage
    self.check_branchid_chainage(branchid, chainage)

    dct = pd.DataFrame(
        {
            "id": id,
            "name": name,
            "branchid": branchid,
            "chainage": chainage,
            "crestlevel": crestlevel,
            "crestwidth": crestwidth,
            "dischargecoeff": dischargecoeff,
            "usevelocityheight": usevelocityheight,
            "numlevels": numlevels,
            "allowedflowdir": allowedflowdir,
            "yvalues": yvalues,
            "zvalues": zvalues,
        },
        index=[id],
    )
    self.uweirs_df = pd.concat([self.uweirs_df, dct], ignore_index=True)
as_dataframe(generalstructures: bool = False, pumps: bool = False, rweirs: bool = False, bridges: bool = False, culverts: bool = False, uweirs: bool = False, orifices: bool = False, compounds: bool = False) -> pd.DataFrame

Returns a dataframe with the structures. Specify with the keyword arguments what structure types need to be returned.

Source code in hydrolib/dhydamo/core/hydamo.py
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
@validate_arguments(config=ConfigDict(arbitrary_types_allowed=True))
def as_dataframe(
    self,
    generalstructures: bool = False,
    pumps: bool = False,
    rweirs: bool = False,
    bridges: bool = False,
    culverts: bool = False,
    uweirs: bool = False,
    orifices: bool = False,
    compounds: bool = False,
) -> pd.DataFrame:
    """
    Returns a dataframe with the structures. Specify with the keyword arguments what structure types need to be returned.
    """
    dfs = []
    for df, descr, add in zip(
        [
            self.generalstructures_df,
            self.culverts_df,
            self.rweirs_df,
            self.bridges_df,
            self.pumps_df,
            self.uweirs_df,
            self.orifices_df,
            self.compounds_df,
        ],
        [
            "generalstructures",
            "culvert",
            "weir",
            "bridge",
            "pump",
            "uweir",
            "orifice",
            "compound",
        ],
        [
            generalstructures,
            culverts,
            rweirs,
            bridges,
            pumps,
            uweirs,
            orifices,
            compounds,
        ],
    ):
        if any(df) and add:
            df = df.copy()
            df.insert(loc=0, column="structype", value=descr, allow_duplicates=True)
            dfs.append(df)

    if len(dfs) > 0:
        return pd.concat(dfs, sort=False, ignore_index=True)

remove_nan_values(base)

Remove nan values from object

Parameters:

Name Type Description Default
base _type_

input data, containig nans

required

Returns:

Name Type Description
base_copy

output data with nans-filtered

Source code in hydrolib/dhydamo/core/hydamo.py
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
def remove_nan_values(base):
    """Remove nan values from object

    Args:
        base (_type_): input data, containig nans

    Returns:
       base_copy : output data with nans-filtered
    """
    base_copy = base.copy()
    for k, v in base.items():
        if isinstance(v, float):
            if np.isnan(v):
                base_copy.pop(k)
    return base_copy