Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions app/compose/production/app_postgres/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@ RUN mkdir ${CONFIG_ROOT}
COPY app/requirements/base.txt ${CONFIG_ROOT}/base.txt
COPY app/requirements/production.txt ${CONFIG_ROOT}/production.txt

# Install gettext utilities for translations
RUN apt-get update && \
apt-get install -y --no-install-recommends gettext-base gettext git && \
rm -rf /var/lib/apt/lists/*

RUN pip install --upgrade pip \
&& pip install --no-cache-dir -r ${CONFIG_ROOT}/production.txt

WORKDIR ${APP_ROOT}

# Install gettext utilities for translations
RUN apt-get update && \
apt-get install -y --no-install-recommends gettext-base gettext && \
rm -rf /var/lib/apt/lists/*

ADD app/ ${APP_ROOT}

Expand Down
115 changes: 25 additions & 90 deletions app/projects/management/commands/datapackage.py
Original file line number Diff line number Diff line change
@@ -1,112 +1,47 @@
from django.core.management.base import BaseCommand, CommandError
from projects.models import (
Asset,
AssetType,
TopologyNode,
Scenario,
Timeseries,
ConnectionLink,
Bus,
)
import pandas as pd
from projects.models import Scenario
from pathlib import Path
import numpy as np
import shutil
from oemof.datapackage.datapackage import building
import datapackage as dp


class Command(BaseCommand):
help = "Convert the given scenarios to datapackages"

def add_arguments(self, parser):
parser.add_argument("scen_id", nargs="+", type=int)

parser.add_argument(
"--overwrite", action="store_true", help="Overwrite the datapackage"
)
parser.add_argument("-o", "--outfile", type=str, nargs="?", const="")

def handle(self, *args, **options):
overwrite = options["overwrite"]

for scen_id in options["scen_id"]:
try:
scenario = Scenario.objects.get(pk=scen_id)
except Scenario.DoesNotExist:
raise CommandError('Scenario "%s" does not exist' % scen_id)

destination_path = Path(__file__).resolve().parents[4]

# Create a folder with a datapackage structure
scenario_folder = destination_path / f"scenario_{scen_id}"
create_folder = True

if scenario_folder.exists():
if not overwrite:
create_folder = False
else:
shutil.rmtree(scenario_folder)

elements_folder = scenario_folder / "data" / "elements"
sequences_folder = scenario_folder / "data" / "sequences"

if create_folder:
# create subfolders
(scenario_folder / "scripts").mkdir(parents=True)
elements_folder.mkdir(parents=True)
sequences_folder.mkdir(parents=True)

# List all components of the scenario (except the busses)
qs_assets = Asset.objects.filter(scenario=scenario)
# List all distinct components' assettypes (or facade name)
facade_names = qs_assets.distinct().values_list(
"asset_type__asset_type", flat=True
destination_path = options["outfile"]
if destination_path == "":
destination_path = Path(__file__).resolve().parents[4]
else:
destination_path = Path(destination_path)

scenario_folder = destination_path / f"scenario_{scenario.name}".replace(
" ", "_"
)
if scenario_folder.exists():
shutil.rmtree(scenario_folder)

bus_resource_records = []
profile_resource_records = {}
for facade_name in facade_names:
resource_records = []
for i, asset in enumerate(
qs_assets.filter(asset_type__asset_type=facade_name)
):
resource_rec, bus_resource_rec, profile_resource_rec = (
asset.to_datapackage()
)
resource_records.append(resource_rec)
# those constitute the busses and sequences used by this asset
bus_resource_records.extend(bus_resource_rec)
profile_resource_records.update(profile_resource_rec)

if resource_records:
out_path = elements_folder / f"{facade_name}.csv"
Path(out_path).parent.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame(resource_records)
df.to_csv(out_path, index=False)
dp_json = scenario_folder / "datapackage.json"

# Save all unique busses to a elements resource
if bus_resource_records:
out_path = elements_folder / f"bus.csv"
Path(out_path).parent.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame(bus_resource_records)
df.drop_duplicates("name").to_csv(out_path, index=False)
if dp_json.exists():
print("Only inferring metadata")
p = dp.Package(str(dp_json))
building.infer_package_foreign_keys(p, fk_targets=["project"])
p.descriptor["resources"].sort(key=lambda x: (x["path"], x["name"]))
p.commit()
p.save(dp_json)

# Save all profiles to a sequences resource
if profile_resource_records:
out_path = sequences_folder / f"profiles.csv"
Path(out_path).parent.mkdir(parents=True, exist_ok=True)
# add timestamps to the profiles
profile_resource_records["timeindex"] = scenario.get_timestamps()
try:
df = pd.DataFrame(profile_resource_records)
except ValueError as e:
# If not all profiles have the same length we pad the shorter profiles with np.nan
max_len = max(len(v) for v in profile_resource_records.values())
profile_resource_records = {
k: v + [np.nan] * (max_len - len(v))
for k, v in profile_resource_records.items()
}
df = pd.DataFrame(profile_resource_records)
print(
f"Some profiles have more timesteps that other profiles in scenario {scenario.name}({scen_id}) --> the shorter profiles will be expanded with NaN values"
)
# TODO check if there are column duplicates
df.set_index("timeindex").to_csv(out_path, index=True)
else:
print("Creating datapackage.json")
scenario.to_datapackage(destination_path)
Loading