Skip to content

Commit

Permalink
Merge pull request #94 from intelligent-environments-lab/citylearn-ch…
Browse files Browse the repository at this point in the history
…allenge-2023-power-outage-env

Citylearn challenge 2023 power outage env
  • Loading branch information
kingsleynweye authored Nov 12, 2023
2 parents 3c48731 + 049f31a commit 6e7be52
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 9 deletions.
Binary file modified assets/images/citylearn_systems.pdf
Binary file not shown.
Binary file modified assets/images/citylearn_systems.pptx
Binary file not shown.
52 changes: 46 additions & 6 deletions citylearn/building.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,6 +759,10 @@ def observations(self, include_all: bool = None, normalize: bool = None, periodi
'cooling_electricity_consumption': self.cooling_electricity_consumption[self.time_step],
'heating_electricity_consumption': self.heating_electricity_consumption[self.time_step],
'dhw_electricity_consumption': self.dhw_electricity_consumption[self.time_step],
'cooling_storage_electricity_consumption': self.cooling_storage_electricity_consumption[self.time_step],
'heating_storage_electricity_consumption': self.heating_storage_electricity_consumption[self.time_step],
'dhw_storage_electricity_consumption': self.dhw_storage_electricity_consumption[self.time_step],
'electrical_storage_electricity_consumption': self.electrical_storage_electricity_consumption[self.time_step],
'cooling_device_cop': self.cooling_device.get_cop(self.weather.outdoor_dry_bulb_temperature[self.time_step], heating=False),
'heating_device_cop': self.heating_device.get_cop(
self.weather.outdoor_dry_bulb_temperature[self.time_step], heating=True
Expand All @@ -770,7 +774,7 @@ def observations(self, include_all: bool = None, normalize: bool = None, periodi
}

if include_all:
valid_observations = list(self.observation_metadata.keys())
valid_observations = list(data.keys())
else:
valid_observations = self.active_observations

Expand Down Expand Up @@ -1151,16 +1155,16 @@ def estimate_observation_space_limits(self, include_all: bool = None, periodic_n

# Use entire dataset length for space limit estimation
data = {
'solar_generation':np.array(self.pv.get_generation(self.energy_simulation.__getattr__(
'solar_generation',
start_time_step=self.episode_tracker.simulation_start_time_step,
end_time_step=self.episode_tracker.simulation_end_time_step
))),
**{k.lstrip('_'): self.energy_simulation.__getattr__(
k.lstrip('_'),
start_time_step=self.episode_tracker.simulation_start_time_step,
end_time_step=self.episode_tracker.simulation_end_time_step
) for k in vars(self.energy_simulation)},
'solar_generation':np.array(self.pv.get_generation(self.energy_simulation.__getattr__(
'solar_generation',
start_time_step=self.episode_tracker.simulation_start_time_step,
end_time_step=self.episode_tracker.simulation_end_time_step
))),
**{k.lstrip('_'): self.weather.__getattr__(
k.lstrip('_'),
start_time_step=self.episode_tracker.simulation_start_time_step,
Expand Down Expand Up @@ -1253,6 +1257,42 @@ def estimate_observation_space_limits(self, include_all: bool = None, periodic_n
low_limit[key] = 0.0
high_limit[key] = self.dhw_device.nominal_power

elif key == 'cooling_storage_electricity_consumption':
demand = self.energy_simulation.__getattr__(
f'cooling_demand',
start_time_step=self.episode_tracker.simulation_start_time_step,
end_time_step=self.episode_tracker.simulation_end_time_step
)
electricity_consumption = self.cooling_device.get_input_power(demand, data['outdoor_dry_bulb_temperature'], False)
low_limit[key] = -max(electricity_consumption)
high_limit[key] = self.cooling_device.nominal_power

elif key == 'heating_storage_electricity_consumption':
demand = self.energy_simulation.__getattr__(
f'heating_demand',
start_time_step=self.episode_tracker.simulation_start_time_step,
end_time_step=self.episode_tracker.simulation_end_time_step
)
electricity_consumption = self.heating_device.get_input_power(demand, data['outdoor_dry_bulb_temperature'], True)\
if isinstance(self.heating_device, HeatPump) else self.heating_device.get_input_power(demand)
low_limit[key] = -max(electricity_consumption)
high_limit[key] = self.heating_device.nominal_power

elif key == 'dhw_storage_electricity_consumption':
demand = self.energy_simulation.__getattr__(
f'dhw_demand',
start_time_step=self.episode_tracker.simulation_start_time_step,
end_time_step=self.episode_tracker.simulation_end_time_step
)
electricity_consumption = self.dhw_device.get_input_power(demand, data['outdoor_dry_bulb_temperature'], True)\
if isinstance(self.dhw_device, HeatPump) else self.dhw_device.get_input_power(demand)
low_limit[key] = -max(electricity_consumption)
high_limit[key] = self.dhw_device.nominal_power

elif key == 'electrical_storage_electricity_consumption':
low_limit[key] = -self.electrical_storage.nominal_power
high_limit[key] = self.electrical_storage.nominal_power

elif key == 'power_outage':
low_limit[key] = 0.0
high_limit[key] = 1.0
Expand Down
11 changes: 8 additions & 3 deletions citylearn/citylearn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from copy import deepcopy
from enum import Enum, unique
from enum import Enum
import importlib
import logging
import os
Expand Down Expand Up @@ -128,14 +128,16 @@ def __init__(self,
self.central_agent = central_agent
self.shared_observations = shared_observations

# set reward function
self.reward_function = reward_function

# reset environment and initializes episode time steps
self.reset()

# reset episode tracker to start after initializing episode time steps during reset
self.episode_tracker.reset_episode_index()

# set reward function
self.reward_function = reward_function
# set reward metadata
self.reward_function.env_metadata = self.get_metadata()

# reward history tracker
Expand Down Expand Up @@ -1141,6 +1143,9 @@ def reset(self) -> List[List[float]]:
for building in self.buildings:
building.reset()

# reset reward function (does nothing by default)
self.reward_function.reset()

# variable reset
self.__rewards = [[]]
self.__net_electricity_consumption = []
Expand Down
5 changes: 5 additions & 0 deletions citylearn/reward_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ def central_agent(self) -> bool:
def env_metadata(self, env_metadata: Mapping[str, Any]):
self.__env_metadata = env_metadata

def reset(self):
"""Use to reset variables at the start of an episode."""

pass

def calculate(self, observations: List[Mapping[str, Union[int, float]]]) -> List[float]:
r"""Calculates reward.
Expand Down

0 comments on commit 6e7be52

Please sign in to comment.