Coverage for aixweather/core_data_format_2_output_file/to_epw_energyplus.py: 97%
193 statements
« prev ^ index » next coverage.py v7.4.4, created at 2025-01-06 16:01 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2025-01-06 16:01 +0000
1"""
2convert core data to epw (EnergyPlus) data
3"""
5import csv
6import datetime as dt
7import logging
9import pandas as pd
10import numpy as np
12from aixweather import definitions
13from aixweather.imports.utils_import import MetaData
14from aixweather.transformation_functions import auxiliary, time_observation_transformations, pass_through_handling
16logger = logging.getLogger(__name__)
19"""
20format_epw information:
21for links see readme
23Format info:
24key = output data point name
25core_name = corresponding name matching the format_core_data
26time_of_meas_shift = desired 30min shifting+interpolation to convert the value that is "at
27indicated time" to "average of preceding hour" (ind2prec).
28unit = unit of the output data following the naming convention of format_core_data
29nan = The default values stated from the format_epw information, those values are
30filled if nan.
32All changes here automatically change the calculations.
33Exception: unit conversions have to be added manually.
35Information for shifting:
36Hour: This is the hour of the data. (1 - 24). Hour 1 is 00:01 to 01:00. Cannot be missing.
37but e.g.:
38DryBulbTemp: This is the dry bulb temperature in C at the time indicated.
39and:
40GlobHorRad: received on a horizontal surface during the hour preceding the time indicated.
41----> Hence, we assume that hour 1 should show the DryBulbTemp from
420:30 to 1:30, i.e. the Temp at indicated time.
44time of measurement checked by Martin Rätz (07.08.2023)
45units checked by Martin Rätz (07.08.2023)
46"""
47format_epw = {
48 "Year": {"core_name": "", "unit": "year", "time_of_meas_shift": None, "nan": None},
49 "Month": {"core_name": "", "unit": "month", "time_of_meas_shift": None, "nan": None},
50 "Day": {"core_name": "", "unit": "day", "time_of_meas_shift": None, "nan": None},
51 "Hour": {"core_name": "", "unit": "hour", "time_of_meas_shift": None, "nan": None},
52 "Minute": {"core_name": "", "unit": "minute", "time_of_meas_shift": None, "nan": None},
53 "Data Source and Uncertainty Flags": {"core_name": "", "unit": None, "time_of_meas_shift": None, "nan": "?"},
54 "DryBulbTemp": {"core_name": "DryBulbTemp", "unit": "degC", "time_of_meas_shift": None, "nan": 99.9},
55 "DewPointTemp": {"core_name": "DewPointTemp", "unit": "degC", "time_of_meas_shift": None, "nan": 99.9},
56 "RelHum": {"core_name": "RelHum", "unit": "percent", "time_of_meas_shift": None, "nan": 999.0},
57 "AtmPressure": {"core_name": "AtmPressure", "unit": "Pa", "time_of_meas_shift": None, "nan": 999999.0},
58 "ExtHorRad": {"core_name": "ExtHorRad", "unit": "Wh/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
59 "ExtDirNormRad": {"core_name": "ExtDirNormRad", "unit": "Wh/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
60 "HorInfra": {"core_name": "HorInfra", "unit": "Wh/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
61 "GlobHorRad": {"core_name": "GlobHorRad", "unit": "Wh/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
62 "DirNormRad": {"core_name": "DirNormRad", "unit": "Wh/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
63 "DiffHorRad": {"core_name": "DiffHorRad", "unit": "Wh/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
64 "GlobHorIll": {"core_name": "GlobHorIll", "unit": "lux", "time_of_meas_shift": 'ind2prec', "nan": 999999.0},
65 "DirecNormIll": {"core_name": "DirecNormIll", "unit": "lux", "time_of_meas_shift": 'ind2prec', "nan": 999999.0},
66 "DiffuseHorIll": {"core_name": "DiffuseHorIll", "unit": "lux", "time_of_meas_shift": 'ind2prec', "nan": 999999.0},
67 "ZenithLum": {"core_name": "ZenithLum", "unit": "Cd/m2", "time_of_meas_shift": 'ind2prec', "nan": 9999.0},
68 "WindDir": {"core_name": "WindDir", "unit": "deg", "time_of_meas_shift": None, "nan": 999.0},
69 "WindSpeed": {"core_name": "WindSpeed", "unit": "m/s", "time_of_meas_shift": None, "nan": 999.0},
70 "TotalSkyCover": {"core_name": "TotalSkyCover", "unit": "1tenth", "time_of_meas_shift": None, "nan": 99},
71 "OpaqueSkyCover": {"core_name": "OpaqueSkyCover", "unit": "1tenth", "time_of_meas_shift": None, "nan": 99},
72 "Visibility": {"core_name": "Visibility", "unit": "km", "time_of_meas_shift": None, "nan": 9999.0},
73 "CeilingH": {"core_name": "CeilingH", "unit": "m", "time_of_meas_shift": None, "nan": 99999},
74 "WeatherObs": {"core_name": "", "unit": "None", "time_of_meas_shift": None, "nan": 9},
75 "WeatherCode": {"core_name": "", "unit": "None", "time_of_meas_shift": None, "nan": 999999999},
76 "PrecWater": {"core_name": "PrecWater", "unit": "mm", "time_of_meas_shift": None, "nan": 999.0},
77 "Aerosol": {"core_name": "Aerosol", "unit": "1thousandth", "time_of_meas_shift": None, "nan": 0.999},
78 "Snow": {"core_name": "", "unit": "cm", "time_of_meas_shift": None, "nan": 999.0},
79 "DaysSinceSnow": {"core_name": "", "unit": "days", "time_of_meas_shift": None, "nan": 99},
80 "Albedo": {"core_name": "", "unit": "None", "time_of_meas_shift": None, "nan": 999},
81 "LiquidPrecD": {"core_name": "LiquidPrecD", "unit": "mm/h", "time_of_meas_shift": None, "nan": 999},
82 "LiquidPrepQuant": {"core_name": "", "unit": "hours", "time_of_meas_shift": None, "nan": 99},
83}
86def to_epw(
87 core_df: pd.DataFrame,
88 meta: MetaData,
89 start: dt.datetime,
90 stop: dt.datetime,
91 fillna: bool,
92 result_folder: str = None,
93 filename: str = None
94) -> (pd.DataFrame, str):
95 """Create an EPW file from the core data.
97 Args:
98 core_df (pd.DataFrame): DataFrame containing core data.
99 meta (MetaData): Metadata associated with the weather data.
100 start (dt.datetime): Timestamp for the start of the EPW file.
101 stop (dt.datetime): Timestamp for the end of the EPW file.
102 fillna (bool): Boolean indicating whether NaN values should be filled.
103 result_folder (str):
104 Path to the folder where to save the file. Default will use
105 the `results_file_path` method.
106 filename (str): Name of the file to be saved. The default is constructed
107 based on the meta-data as well as start and stop time
109 Returns:
110 pd.DataFrame: DataFrame containing the weather data formatted for EPW export,
111 excluding metadata.
112 str: Path to the exported file.
113 """
115 ### create header lines
116 def line1_location(
117 meta: MetaData,
118 ):
119 """
120 Get location metadata (station name, state, country, data_type,
121 stationID, lat, lon, TZ, alt)
123 return:
124 location: List Erstezeile(LOCATION) von epw Daten als List
125 """
127 data_type = ""
128 timezone = 0 # relative to UTC
130 location = [
131 "LOCATION",
132 meta.station_name,
133 "State",
134 "country",
135 data_type,
136 meta.station_id,
137 str(meta.latitude),
138 str(meta.longitude),
139 timezone,
140 str(meta.altitude),
141 ]
143 return location
145 def line2_design_cond():
146 """
147 Erstellen zweite Zeile der epw.
149 return:
150 design_cond: List Zweite Zeile(Design Condition) von epw Daten als List
151 """
152 design_cond = [
153 "DESIGN CONDITIONS",
154 0, # number of design condition
155 ]
157 return design_cond
159 def line3_typ_ext_period(df):
160 """
161 Parsen von weatherdata um typische und extreme Perioden zu holen.
163 Typische Perioden sind Wochen mit Temperatur, die der Durchschnittstemperatur der Saison
164 am nächsten kommt.
165 Extreme Perioden sind Wochen mit Temperatur, die der Maximum-/Minimumtemperatur der Saison
166 am nächsten kommt.
168 return:
169 typical_extreme_period: List Dritte Zeile(TYPICAL/EXTREME PERIODS)
170 von epw Daten als List
171 """
173 typical_extreme_period = [
174 "TYPICAL/EXTREME PERIODS",
175 ]
177 season_dict = {
178 11: "Autumn",
179 12: "Winter",
180 1: "Winter",
181 2: "Winter",
182 3: "Spring",
183 4: "Spring",
184 5: "Spring",
185 6: "Summer",
186 7: "Summer",
187 8: "Summer",
188 9: "Autumn",
189 10: "Autumn",
190 } # Monaten in Saisons zuweisen
192 def group_func(input):
193 """Gruppefunktion für .groupby()"""
194 return season_dict[input.month]
196 df_temp_ambient = df["DryBulbTemp"] # Temperature_Ambient von weatherdata holen
197 number_of_periods = (
198 df_temp_ambient.groupby(group_func).mean().shape[0]
199 ) # Zahl von der Saisons rechnen als Zahl von Perioden
200 typical_extreme_period.append(number_of_periods)
202 # Gruppierung per Saison
203 try:
204 summer_temp = df_temp_ambient.groupby(group_func).get_group("Summer")
205 except KeyError:
206 summer_temp = pd.DataFrame()
207 try:
208 spring_temp = df_temp_ambient.groupby(group_func).get_group("Spring")
209 except KeyError:
210 spring_temp = pd.DataFrame()
211 try:
212 autumn_temp = df_temp_ambient.groupby(group_func).get_group("Autumn")
213 except KeyError:
214 autumn_temp = pd.DataFrame()
215 try:
216 winter_temp = df_temp_ambient.groupby(group_func).get_group("Winter")
217 except KeyError:
218 winter_temp = pd.DataFrame()
220 if not summer_temp.empty:
221 typical_extreme_period[1] = (
222 typical_extreme_period[1] + 1
223 ) # Summer und Winter haben extreme Periode.
224 max_temp_summer = summer_temp.max()
225 typ_temp_summer = summer_temp.mean()
226 summer_temp_w = summer_temp.resample(
227 "W", label="left"
228 ).mean() # Resample in wochentliche Interval
230 # Datenpunkt(typisch und extreme) finden
231 idx_mean_summer = summer_temp_w.sub(typ_temp_summer).abs().idxmin()
232 idx_max_summer = summer_temp_w.sub(max_temp_summer).abs().idxmin()
233 week_closest2mean_summer = summer_temp_w.loc[[idx_mean_summer]] # Starttag
234 week_closest2max_summer = summer_temp_w.loc[[idx_max_summer]] # Starttag
236 # Endtag berechnen
237 weekend_max_summer = week_closest2max_summer.index + dt.timedelta(days=6)
238 weekend_mean_summer = week_closest2mean_summer.index + dt.timedelta(days=6)
240 # List für die Saison erstellen
241 summer = [
242 "Summer - Week Nearest Max Temperature For Period",
243 "Extreme",
244 str(week_closest2max_summer.index.month[0])
245 + "/"
246 + str(week_closest2max_summer.index.day[0]),
247 str(weekend_max_summer.month[0]) + "/" + str(weekend_max_summer.day[0]),
248 "Summer - Week Nearest Average Temperature For Period",
249 "Typical",
250 str(week_closest2mean_summer.index.month[0])
251 + "/"
252 + str(week_closest2mean_summer.index.day[0]),
253 str(weekend_mean_summer.month[0])
254 + "/"
255 + str(weekend_mean_summer.day[0]),
256 ]
258 typical_extreme_period = (
259 typical_extreme_period + summer
260 ) # Liste zusammensetzen
262 # für alle Saison wiederholen
263 if not winter_temp.empty:
264 typical_extreme_period[1] = typical_extreme_period[1] + 1
265 min_temp_winter = winter_temp.min()
266 typ_temp_winter = winter_temp.mean()
267 winter_temp_w = winter_temp.resample("W", label="left").mean()
268 idx_mean_winter = winter_temp_w.sub(typ_temp_winter).abs().idxmin()
269 idx_min_winter = winter_temp_w.sub(min_temp_winter).abs().idxmin()
270 week_closest2mean_winter = winter_temp_w.loc[[idx_mean_winter]]
271 week_closest2min_winter = winter_temp_w.loc[[idx_min_winter]]
272 weekend_min_winter = week_closest2min_winter.index + dt.timedelta(days=6)
273 weekend_mean_winter = week_closest2mean_winter.index + dt.timedelta(days=6)
274 winter = [
275 "Winter - Week Nearest Min Temperature For Period",
276 "Extreme",
277 str(week_closest2min_winter.index.month[0])
278 + "/"
279 + str(week_closest2min_winter.index.day[0]),
280 str(weekend_min_winter.month[0]) + "/" + str(weekend_min_winter.day[0]),
281 "Winter - Week Nearest Average Temperature For Period",
282 "Typical",
283 str(week_closest2mean_winter.index.month[0])
284 + "/"
285 + str(week_closest2mean_winter.index.day[0]),
286 str(weekend_mean_winter.month[0])
287 + "/"
288 + str(weekend_mean_winter.day[0]),
289 ]
291 typical_extreme_period = typical_extreme_period + winter
293 if not autumn_temp.empty:
294 typ_temp_autumn = autumn_temp.mean()
295 autumn_temp_w = autumn_temp.resample("W", label="left").mean()
296 idx_mean_autumn = autumn_temp_w.sub(typ_temp_autumn).abs().idxmin()
297 week_closest2mean_autumn = autumn_temp_w.loc[[idx_mean_autumn]]
298 weekend_mean_autumn = week_closest2mean_autumn.index + dt.timedelta(days=6)
299 autumn = [
300 "Autumn - Week Nearest Average Temperature For Period",
301 "Typical",
302 str(week_closest2mean_autumn.index.month[0])
303 + "/"
304 + str(week_closest2mean_autumn.index.day[0]),
305 str(weekend_mean_autumn.month[0])
306 + "/"
307 + str(weekend_mean_autumn.day[0]),
308 ]
310 typical_extreme_period = typical_extreme_period + autumn
312 if not spring_temp.empty:
313 typ_temp_spring = spring_temp.mean()
314 spring_temp_w = spring_temp.resample("W", label="left").mean()
315 idx_mean_spring = spring_temp_w.sub(typ_temp_spring).abs().idxmin()
316 week_closest2mean_spring = spring_temp_w.loc[[idx_mean_spring]]
317 weekend_mean_spring = week_closest2mean_spring.index + dt.timedelta(days=6)
318 spring = [
319 "Spring - Week Nearest Average Temperature For Period",
320 "Typical",
321 str(week_closest2mean_spring.index.month[0])
322 + "/"
323 + str(week_closest2mean_spring.index.day[0]),
324 str(weekend_mean_spring.month[0])
325 + "/"
326 + str(weekend_mean_spring.day[0]),
327 ]
329 typical_extreme_period = typical_extreme_period + spring
331 return typical_extreme_period
333 def line4_ground_temp(df):
334 """
335 Parsen von weatherdata, um Bodentemperaturen zu holen.
337 #Todo: Not checked yet if this is calculation is correct
339 return:
340 ground_temp: List Vierte Zeile(GROUND TEMPERATURES) von epw Daten als List
341 """
343 ground_temp = [
344 "GROUND TEMPERATURES",
345 ]
347 df_4_ground_temp = df.copy()
349 df_w_ground = (
350 df_4_ground_temp.resample("M").mean().round(decimals=1)
351 ) # Resample in monatliche Interval
352 try:
353 ground_t = df_w_ground[
354 [
355 "Soil_Temperature_5cm",
356 "Soil_Temperature_10cm",
357 "Soil_Temperature_20cm",
358 "Soil_Temperature_50cm",
359 "Soil_Temperature_1m",
360 ]
361 ].to_numpy() # Dataframe2Array
362 # Array zu Liste umwandeln -> Zusammensetzen
363 ground_temp = (
364 ground_temp
365 + [5] # ground layers
366 + [0.05, None, None, None]
367 + ground_t[:, 0].tolist()
368 + [0.1, None, None, None]
369 + ground_t[:, 1].tolist()
370 + [0.2, None, None, None]
371 + ground_t[:, 2].tolist()
372 + [0.5, None, None, None]
373 + ground_t[:, 3].tolist()
374 + [1, None, None, None]
375 + ground_t[:, 4].tolist()
376 )
377 return ground_temp
378 except KeyError as err:
379 logger.warn(
380 "Error while adding the probably unnecessary ground temperature to the .epw file "
381 "header. A placeholder will be used. Error: %s", err
382 )
383 ground_temp = ground_temp + [0] # 0 ground layers
385 return ground_temp
387 def line5_holiday_dl_saving(df):
388 """
389 Erstellen der 5. Zeile der epw.
391 return:
392 holiday_dl_saving: List 5.Zeile(HOLIDAYS/DAYLIGHT SAVINGS) von epw Daten als List
393 """
395 if True in df.index.is_leap_year:
396 isLeap = "Yes"
397 else:
398 isLeap = "No"
399 holiday_dl_saving = [
400 "HOLIDAYS/DAYLIGHT SAVINGS",
401 isLeap, # Leap Year Observed
402 0, # Daylight Saving Start Date
403 0, # Daylight Saving End Date
404 0, # Number of Holidays
405 ]
406 return holiday_dl_saving
408 def line6_comment_1():
409 """
410 Erstellen der 6. Zeile der epw.
412 return:
413 comment_1: List 6.Zeile(COMMENTS 1) von epw Daten als List
414 """
415 return [
416 "COMMENTS 1",
417 "For data format information see the code or check: "
418 "https://designbuilder.co.uk/cahelp/Content/EnergyPlusWeatherFileFormat.htm",
419 ]
421 def line7_comment_2(comment2=None):
422 """
423 Erstellen der 7. Zeile der epw.
425 return:
426 comment_2: List 7.Zeile(COMMENTS 2) von epw Daten als List
427 """
428 return ["COMMENTS 2", comment2]
430 def line8_data_periods(df):
431 """
432 Parsen von weatherdata, um Start- und Enddatenpunkt zu holen
434 return:
435 data_periods: List 8.Zeile(DATA PERIODS) von epw Daten als List
436 """
437 start_dp = df.index[0]
438 end_dp = df.index[-1]
439 data_periods = [
440 "DATA PERIODS",
441 1, # Anzahl von Datenperioden
442 1, # Anzahl von Intervale in einer Stunde
443 "Data", # DP Name oder Beschreibung
444 start_dp.strftime("%A"), # DP Starttag
445 start_dp.strftime("%m/%d"), # DP Startdatum
446 end_dp.strftime("%m/%d"), # DP Enddatum
447 ]
448 return data_periods
450 ### parse actual data
451 def format_data(df, start, stop):
452 """
453 Parsen von weatherdata, für den export
455 return:
456 data_list: List Datasätze von epw Daten als List
457 """
459 ### measurement time conversion
460 df = time_observation_transformations.shift_time_by_dict(format_epw, df)
462 ### if possible avoid back and forth interpolating -> pass through
463 ### variables without shifting
464 df = pass_through_handling.pass_through_measurements_with_back_and_forth_interpolating(
465 df, format_epw
466 )
468 ### select only desired period
469 df = time_observation_transformations.truncate_data_from_start_to_stop(
470 df, start, stop
471 )
473 ### select the desired columns
474 df = auxiliary.force_data_variable_convention(df, format_epw)
476 # fill newly created variables of desired output format
477 # Index von Dataframe aufspalten
478 df["Year"] = pd.DatetimeIndex(df.index).year
479 df["Month"] = pd.DatetimeIndex(df.index).month
480 df["Day"] = pd.DatetimeIndex(df.index).day
481 df["Hour"] = pd.DatetimeIndex(df.index).hour
482 df["Minute"] = pd.DatetimeIndex(df.index).minute
484 ### meet special epw requirements
485 # Stunden 0 zu 24 der vorherigen Tag umwandeln
486 df["Hour"] = df["Hour"].replace([0], 24)
487 # Falls Tag ungleich 1 -> Tag substrahieren mit 1
488 df.loc[(df["Hour"] == 24) & (df["Day"] != 1), "Day"] = df.loc[
489 (df["Hour"] == 24) & (df["Day"] != 1), "Day"
490 ].sub(1)
491 # Falls Tag gleich 1 -> Jahr, Monat, Tag loeschen -> mit ffill nachfuellen
492 df.loc[
493 (df["Hour"] == 24) & (df["Day"] == 1),
494 ["Year", "Month", "Day"]
495 ] = np.nan
496 df["Year"] = (
497 df["Year"].ffill().bfill().astype(int)
498 )
499 df["Month"] = (
500 df["Month"].ffill().bfill().astype(int)
501 )
502 df["Day"] = df["Day"].ffill().bfill().astype(int)
503 df.reset_index(drop=True, inplace=True)
505 # data should always contain full days
506 def fill_full_first_day(df):
507 # Identify the first hour and date of the DataFrame
508 first_minute = df.iloc[0]["Minute"]
509 first_hour = df.iloc[0]["Hour"]
510 first_day = df.iloc[0]["Day"]
511 first_month = df.iloc[0]["Month"]
512 first_year = df.iloc[0]["Year"]
513 rows_to_add = 0
517 # If the first hour is not 1, add rows to start with hour 1
518 if first_hour != 1:
519 # If the first hour is 24, we dont want to add an full extra day, just delete the
520 # line so that the data frame starts with hour 1
521 if first_hour == 24:
522 df = df.drop(df.index[0])
523 else:
524 # Calculate how many rows to add
525 rows_to_add = int(first_hour) - 1
527 # Generate new rows
528 for i in range(rows_to_add, 0, -1):
529 new_row = pd.DataFrame(
530 {
531 "Minute": [first_minute],
532 "Hour": [i],
533 "Day": [first_day],
534 "Month": [first_month],
535 "Year": [first_year],
536 }
537 )
538 df = pd.concat([new_row, df]).reset_index(drop=True)
539 return df, rows_to_add
541 def fill_full_last_day(df):
542 # Identify the last hour and date of the DataFrame
543 last_hour = df.iloc[-1]["Hour"]
544 last_day = df.iloc[-1]["Day"]
545 last_month = df.iloc[-1]["Month"]
546 last_year = df.iloc[-1]["Year"]
547 last_minute = df.iloc[-1]["Minute"]
548 rows_to_add = 0
550 # If the last hour is not 24, add rows to reach hour 24
551 if last_hour != 24:
552 # If the last hour is 0, we dont want to add a full extra day, just delete the
553 # line so that the data frame ends with hour 24
554 if last_hour == 0:
555 df = df.drop(df.index[-1])
556 else:
557 # Calculate how many rows to add
558 rows_to_add = 24 - int(last_hour)
560 # Generate new rows
561 new_rows = []
562 for i in range(1, rows_to_add + 1):
563 new_row = {
564 "Minute": last_minute,
565 "Hour": last_hour + i,
566 "Day": last_day,
567 "Month": last_month,
568 "Year": last_year,
569 }
570 new_rows.append(new_row)
572 # Append new rows to DataFrame
573 df = pd.concat([df, pd.DataFrame(new_rows)], ignore_index=True)
574 return df, rows_to_add
576 df, first_day_added_rows = fill_full_first_day(df)
577 df, last_day_added_rows = fill_full_last_day(df)
579 # ensure data type where required
580 columns_to_convert = ["Year", "Month", "Day", "Hour", "Minute"]
581 for col in columns_to_convert:
582 df[col] = df[col].astype(float).astype(int)
584 ### fill NaNs
585 if fillna:
586 # Forward-fill added rows at end of df
587 df.iloc[-last_day_added_rows:, :] = df.ffill().iloc[
588 -last_day_added_rows:, :
589 ]
590 # fill added rows at beginning of df
591 df.iloc[:first_day_added_rows, :] = df.bfill().iloc[
592 :first_day_added_rows, :
593 ]
595 # fill first and last lines nans (possibly lost through shifting)
596 df.iloc[0 + first_day_added_rows + 1, :] = df.bfill().iloc[
597 0 + first_day_added_rows + 1, :
598 ]
599 df.iloc[-1 - last_day_added_rows, :] = df.ffill().iloc[
600 -1 - last_day_added_rows, :
601 ]
603 # fill default nans to the rest
604 df = auxiliary.fill_nan_from_format_dict(df, format_epw)
606 # cut off float digits (required for EnergyPlus)
607 df = df.applymap(lambda x: (f"{x:.1f}") if isinstance(x, float) else x)
609 # again make sure correct order and variables are applied
610 # (processing might have mixed it up)
611 df = auxiliary.force_data_variable_convention(df, format_epw)
613 ### format dataframe to list
614 data_list = df[format_epw.keys()].to_numpy().tolist()
616 return data_list, df
618 ### evaluate correctness of format
619 auxiliary.evaluate_transformations(
620 core_format=definitions.format_core_data, other_format=format_epw
621 )
623 df = core_df.copy()
625 # format data to epw
626 df_epw_as_list, df_epw = format_data(df, start, stop)
628 # get final start and stop time (differs from start, stop due to filling to full days)
629 start_epw = pd.to_datetime(df_epw.iloc[[0]][['Year', 'Month', 'Day', 'Hour']]).iloc[0]
630 stop_epw = pd.to_datetime(df_epw.iloc[[-1]][['Year', 'Month', 'Day', 'Hour']]).iloc[-1]
631 # truncate core data for other calculations
632 df_truncated = time_observation_transformations.truncate_data_from_start_to_stop(
633 df, start_epw, stop_epw
634 )
636 # keep regular start stop in the filename for the unit tests
637 if filename is None:
638 filename = (
639 f"{meta.station_id}_{start.strftime('%Y%m%d')}_{stop.strftime('%Y%m%d')}"
640 f"_{meta.station_name}.epw"
641 )
642 # get file path to safe data to
643 file_path = definitions.results_file_path(filename, result_folder)
645 ### merge all header lines and the data to be saved in a .epw file
646 with open(file_path, "w", newline="", encoding="latin1") as file:
647 writer = csv.writer(file)
648 writer.writerows(
649 [
650 line1_location(meta),
651 line2_design_cond(),
652 line3_typ_ext_period(df_truncated),
653 line4_ground_temp(df_truncated),
654 line5_holiday_dl_saving(df_truncated),
655 line6_comment_1(),
656 line7_comment_2(),
657 line8_data_periods(df_truncated),
658 ]
659 )
660 writer.writerows(df_epw_as_list)
662 logger.info("EPW file saved to %s.", file_path)
664 return df, file_path