diff --git a/examples/VirtualCamera/Env.tscn b/examples/VirtualCamera/Env.tscn
index 269ee0d..bb161a0 100644
--- a/examples/VirtualCamera/Env.tscn
+++ b/examples/VirtualCamera/Env.tscn
@@ -56,6 +56,7 @@ transform = Transform3D(1, 0, 0, 0, 1, 0, 0, 0, 1, 63, 0, -50)
[node name="Sync" type="Node" parent="."]
process_priority = -1
script = ExtResource("2")
+onnx_model_path = "VirtualCamera.onnx"
[node name="Camera" type="Camera3D" parent="."]
transform = Transform3D(1, 0, 0, 0, 0.0220418, 0.999757, 0, -0.999757, 0.0220418, 25.3538, 75.4275, -10.0795)
diff --git a/examples/VirtualCamera/Player.gd b/examples/VirtualCamera/Player.gd
index 02c4586..144dd44 100644
--- a/examples/VirtualCamera/Player.gd
+++ b/examples/VirtualCamera/Player.gd
@@ -113,7 +113,8 @@ func reset():
func update_reward():
- ai_controller.reward -= 0.01 # step penalty
+ #ai_controller.reward -= 0.01 # step penalty
+ pass
func calculate_translation(other_pad_translation: Vector3) -> Vector3:
diff --git a/examples/VirtualCamera/Player.tscn b/examples/VirtualCamera/Player.tscn
index e49a276..fe82295 100644
--- a/examples/VirtualCamera/Player.tscn
+++ b/examples/VirtualCamera/Player.tscn
@@ -2,8 +2,8 @@
[ext_resource type="Script" path="res://Player.gd" id="1"]
[ext_resource type="PackedScene" uid="uid://b4hphc8dab5h" path="res://Robot.tscn" id="2"]
-[ext_resource type="PackedScene" uid="uid://b30vsuwotx0u2" path="res://VirtualCamera.tscn" id="3_tv0v5"]
[ext_resource type="Script" path="res://AIController3D.gd" id="4_rq7t7"]
+[ext_resource type="PackedScene" uid="uid://baaywi3arsl2m" path="res://addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.tscn" id="4_ybcln"]
[sub_resource type="CapsuleShape3D" id="1"]
radius = 1.6
@@ -27,8 +27,11 @@ mesh = SubResource("2")
[node name="Robot" parent="." instance=ExtResource("2")]
transform = Transform3D(1, 0, 0, 0, 1, 0, 0, 0, 1, 0, -1.43952, 0.0576344)
-[node name="RGBCameraSensor3D" parent="." instance=ExtResource("3_tv0v5")]
-
[node name="AIController3D" type="Node3D" parent="."]
script = ExtResource("4_rq7t7")
reset_after = 10000
+
+[node name="RGBCameraSensor3D" parent="." instance=ExtResource("4_ybcln")]
+training_mode = true
+render_image_resolution = Vector2(10, 10)
+displayed_image_scale_factor = Vector2(20, 20)
diff --git a/examples/VirtualCamera/VirtualCamera.csproj b/examples/VirtualCamera/VirtualCamera.csproj
new file mode 100644
index 0000000..9e0f6e5
--- /dev/null
+++ b/examples/VirtualCamera/VirtualCamera.csproj
@@ -0,0 +1,11 @@
+
+
+ net6.0
+ net7.0
+ net8.0
+ true
+
+
+
+
+
\ No newline at end of file
diff --git a/examples/VirtualCamera/VirtualCamera.onnx b/examples/VirtualCamera/VirtualCamera.onnx
new file mode 100644
index 0000000..6f20344
Binary files /dev/null and b/examples/VirtualCamera/VirtualCamera.onnx differ
diff --git a/examples/VirtualCamera/VirtualCamera.sln b/examples/VirtualCamera/VirtualCamera.sln
new file mode 100644
index 0000000..c37b802
--- /dev/null
+++ b/examples/VirtualCamera/VirtualCamera.sln
@@ -0,0 +1,19 @@
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 2012
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "VirtualCamera", "VirtualCamera.csproj", "{716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ ExportDebug|Any CPU = ExportDebug|Any CPU
+ ExportRelease|Any CPU = ExportRelease|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}.ExportDebug|Any CPU.ActiveCfg = ExportDebug|Any CPU
+ {716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}.ExportDebug|Any CPU.Build.0 = ExportDebug|Any CPU
+ {716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}.ExportRelease|Any CPU.ActiveCfg = ExportRelease|Any CPU
+ {716A4AE2-20D9-4B9A-BB9F-FA26A57B6249}.ExportRelease|Any CPU.Build.0 = ExportRelease|Any CPU
+ EndGlobalSection
+EndGlobal
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_2d.gd b/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_2d.gd
index e9080f0..6536956 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_2d.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_2d.gd
@@ -1,8 +1,29 @@
extends Node2D
class_name AIController2D
+enum ControlModes { INHERIT_FROM_SYNC, HUMAN, TRAINING, ONNX_INFERENCE, RECORD_EXPERT_DEMOS }
+@export var control_mode: ControlModes = ControlModes.INHERIT_FROM_SYNC
+@export var onnx_model_path := ""
@export var reset_after := 1000
+@export_group("Record expert demos mode options")
+## Path where the demos will be saved. The file can later be used for imitation learning.
+@export var expert_demo_save_path: String
+## The action that erases the last recorded episode from the currently recorded data.
+@export var remove_last_episode_key: InputEvent
+## Action will be repeated for n frames. Will introduce control lag if larger than 1.
+## Can be used to ensure that action_repeat on inference and training matches
+## the recorded demonstrations.
+@export var action_repeat: int = 1
+
+@export_group("Multi-policy mode options")
+## Allows you to set certain agents to use different policies.
+## Changing has no effect with default SB3 training. Works with Rllib example.
+## Tutorial: https://github.com/edbeeching/godot_rl_agents/blob/main/docs/TRAINING_MULTIPLE_POLICIES.md
+@export var policy_name: String = "shared_policy"
+
+var onnx_model: ONNXModel
+
var heuristic := "human"
var done := false
var reward := 0.0
@@ -11,72 +32,88 @@ var needs_reset := false
var _player: Node2D
+
func _ready():
add_to_group("AGENT")
-
+
+
func init(player: Node2D):
_player = player
-
+
+
#-- Methods that need implementing using the "extend script" option in Godot --#
func get_obs() -> Dictionary:
- assert(false, "the get_obs method is not implemented when extending from ai_controller")
- return {"obs":[]}
+ assert(false, "the get_obs method is not implemented when extending from ai_controller")
+ return {"obs": []}
-func get_reward() -> float:
- assert(false, "the get_reward method is not implemented when extending from ai_controller")
+
+func get_reward() -> float:
+ assert(false, "the get_reward method is not implemented when extending from ai_controller")
return 0.0
-
+
+
func get_action_space() -> Dictionary:
- assert(false, "the get get_action_space method is not implemented when extending from ai_controller")
+ assert(
+ false,
+ "the get get_action_space method is not implemented when extending from ai_controller"
+ )
return {
- "example_actions_continous" : {
- "size": 2,
- "action_type": "continuous"
- },
- "example_actions_discrete" : {
- "size": 2,
- "action_type": "discrete"
- },
- }
-
-func set_action(action) -> void:
- assert(false, "the get set_action method is not implemented when extending from ai_controller")
+ "example_actions_continous": {"size": 2, "action_type": "continuous"},
+ "example_actions_discrete": {"size": 2, "action_type": "discrete"},
+ }
+
+
+func set_action(action) -> void:
+ assert(false, "the set_action method is not implemented when extending from ai_controller")
+
+
+#-----------------------------------------------------------------------------#
+
+
+#-- Methods that sometimes need implementing using the "extend script" option in Godot --#
+# Only needed if you are recording expert demos with this AIController
+func get_action() -> Array:
+ assert(false, "the get_action method is not implemented in extended AIController but demo_recorder is used")
+ return []
+
# -----------------------------------------------------------------------------#
-
+
func _physics_process(delta):
n_steps += 1
if n_steps > reset_after:
needs_reset = true
-
+
+
func get_obs_space():
# may need overriding if the obs space is complex
var obs = get_obs()
return {
- "obs": {
- "size": [len(obs["obs"])],
- "space": "box"
- },
+ "obs": {"size": [len(obs["obs"])], "space": "box"},
}
+
func reset():
n_steps = 0
needs_reset = false
+
func reset_if_done():
if done:
reset()
-
+
+
func set_heuristic(h):
# sets the heuristic from "human" or "model" nothing to change here
heuristic = h
+
func get_done():
return done
-
+
+
func set_done_false():
done = false
+
func zero_reward():
reward = 0.0
-
-
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_3d.gd b/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_3d.gd
index d256b2a..c77d9e0 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_3d.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/controller/ai_controller_3d.gd
@@ -1,8 +1,29 @@
extends Node3D
class_name AIController3D
+enum ControlModes { INHERIT_FROM_SYNC, HUMAN, TRAINING, ONNX_INFERENCE, RECORD_EXPERT_DEMOS }
+@export var control_mode: ControlModes = ControlModes.INHERIT_FROM_SYNC
+@export var onnx_model_path := ""
@export var reset_after := 1000
+@export_group("Record expert demos mode options")
+## Path where the demos will be saved. The file can later be used for imitation learning.
+@export var expert_demo_save_path: String
+## The action that erases the last recorded episode from the currently recorded data.
+@export var remove_last_episode_key: InputEvent
+## Action will be repeated for n frames. Will introduce control lag if larger than 1.
+## Can be used to ensure that action_repeat on inference and training matches
+## the recorded demonstrations.
+@export var action_repeat: int = 1
+
+@export_group("Multi-policy mode options")
+## Allows you to set certain agents to use different policies.
+## Changing has no effect with default SB3 training. Works with Rllib example.
+## Tutorial: https://github.com/edbeeching/godot_rl_agents/blob/main/docs/TRAINING_MULTIPLE_POLICIES.md
+@export var policy_name: String = "shared_policy"
+
+var onnx_model: ONNXModel
+
var heuristic := "human"
var done := false
var reward := 0.0
@@ -11,70 +32,89 @@ var needs_reset := false
var _player: Node3D
+
func _ready():
add_to_group("AGENT")
-
+
+
func init(player: Node3D):
_player = player
-
+
+
#-- Methods that need implementing using the "extend script" option in Godot --#
func get_obs() -> Dictionary:
- assert(false, "the get_obs method is not implemented when extending from ai_controller")
- return {"obs":[]}
+ assert(false, "the get_obs method is not implemented when extending from ai_controller")
+ return {"obs": []}
-func get_reward() -> float:
- assert(false, "the get_reward method is not implemented when extending from ai_controller")
+
+func get_reward() -> float:
+ assert(false, "the get_reward method is not implemented when extending from ai_controller")
return 0.0
-
+
+
func get_action_space() -> Dictionary:
- assert(false, "the get get_action_space method is not implemented when extending from ai_controller")
+ assert(
+ false,
+ "the get_action_space method is not implemented when extending from ai_controller"
+ )
return {
- "example_actions_continous" : {
- "size": 2,
- "action_type": "continuous"
- },
- "example_actions_discrete" : {
- "size": 2,
- "action_type": "discrete"
- },
- }
-
-func set_action(action) -> void:
- assert(false, "the get set_action method is not implemented when extending from ai_controller")
+ "example_actions_continous": {"size": 2, "action_type": "continuous"},
+ "example_actions_discrete": {"size": 2, "action_type": "discrete"},
+ }
+
+
+func set_action(action) -> void:
+ assert(false, "the set_action method is not implemented when extending from ai_controller")
+
+
+#-----------------------------------------------------------------------------#
+
+
+#-- Methods that sometimes need implementing using the "extend script" option in Godot --#
+# Only needed if you are recording expert demos with this AIController
+func get_action() -> Array:
+ assert(false, "the get_action method is not implemented in extended AIController but demo_recorder is used")
+ return []
+
# -----------------------------------------------------------------------------#
-
+
+
func _physics_process(delta):
n_steps += 1
if n_steps > reset_after:
needs_reset = true
-
+
+
func get_obs_space():
# may need overriding if the obs space is complex
var obs = get_obs()
return {
- "obs": {
- "size": [len(obs["obs"])],
- "space": "box"
- },
+ "obs": {"size": [len(obs["obs"])], "space": "box"},
}
+
func reset():
n_steps = 0
needs_reset = false
+
func reset_if_done():
if done:
reset()
-
+
+
func set_heuristic(h):
# sets the heuristic from "human" or "model" nothing to change here
heuristic = h
+
func get_done():
return done
-
+
+
func set_done_false():
done = false
+
func zero_reward():
reward = 0.0
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/onnx/csharp/ONNXInference.cs b/examples/VirtualCamera/addons/godot_rl_agents/onnx/csharp/ONNXInference.cs
index 0741f0f..6dcfa18 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/onnx/csharp/ONNXInference.cs
+++ b/examples/VirtualCamera/addons/godot_rl_agents/onnx/csharp/ONNXInference.cs
@@ -19,16 +19,22 @@ public partial class ONNXInference : GodotObject
private SessionOptions SessionOpt;
- //init function
- ///
- public void Initialize(string Path, int BatchSize)
+ ///
+ /// init function
+ ///
+ ///
+ ///
+ /// Returns the output size of the model
+ public int Initialize(string Path, int BatchSize)
{
modelPath = Path;
batchSize = BatchSize;
SessionOpt = SessionConfigurator.MakeConfiguredSessionOptions();
session = LoadModel(modelPath);
+ return session.OutputMetadata["output"].Dimensions[1];
+ }
+
- }
///
public Godot.Collections.Dictionary> RunInference(Godot.Collections.Array obs, int state_ins)
{
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/onnx/wrapper/ONNX_wrapper.gd b/examples/VirtualCamera/addons/godot_rl_agents/onnx/wrapper/ONNX_wrapper.gd
index c7b14b3..e27f2c3 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/onnx/wrapper/ONNX_wrapper.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/onnx/wrapper/ONNX_wrapper.gd
@@ -4,21 +4,48 @@ var inferencer_script = load("res://addons/godot_rl_agents/onnx/csharp/ONNXInfer
var inferencer = null
+## How many action values the model outputs
+var action_output_size: int
+
+## Used to differentiate models
+## that only output continuous action mean (e.g. sb3, cleanrl export)
+## versus models that output mean and logstd (e.g. rllib export)
+var action_means_only: bool
+
+## Whether action_means_value has been set already for this model
+var action_means_only_set: bool
+
# Must provide the path to the model and the batch size
func _init(model_path, batch_size):
inferencer = inferencer_script.new()
- inferencer.Initialize(model_path, batch_size)
+ action_output_size = inferencer.Initialize(model_path, batch_size)
-# This function is the one that will be called from the game,
+# This function is the one that will be called from the game,
# requires the observation as an array and the state_ins as an int
-# returns an Array containing the action the model takes.
-func run_inference(obs : Array, state_ins : int) -> Dictionary:
+# returns an Array containing the action the model takes.
+func run_inference(obs: Array, state_ins: int) -> Dictionary:
if inferencer == null:
printerr("Inferencer not initialized")
return {}
return inferencer.RunInference(obs, state_ins)
+
func _notification(what):
if what == NOTIFICATION_PREDELETE:
inferencer.FreeDisposables()
inferencer.free()
+
+# Check whether agent uses a continuous actions model with only action means or not
+func set_action_means_only(agent_action_space):
+ action_means_only_set = true
+ var continuous_only: bool = true
+ var continuous_actions: int
+ for action in agent_action_space:
+ if not agent_action_space[action]["action_type"] == "continuous":
+ continuous_only = false
+ break
+ else:
+ continuous_actions += agent_action_space[action]["size"]
+ if continuous_only:
+ if continuous_actions == action_output_size:
+ action_means_only = true
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/GridSensor2D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/GridSensor2D.gd
index 12f2957..da170ba 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/GridSensor2D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/GridSensor2D.gd
@@ -3,49 +3,57 @@ extends ISensor2D
class_name GridSensor2D
@export var debug_view := false:
- get: return debug_view
+ get:
+ return debug_view
set(value):
debug_view = value
_update()
-
+
@export_flags_2d_physics var detection_mask := 0:
- get: return detection_mask
+ get:
+ return detection_mask
set(value):
detection_mask = value
_update()
@export var collide_with_areas := false:
- get: return collide_with_areas
+ get:
+ return collide_with_areas
set(value):
collide_with_areas = value
_update()
@export var collide_with_bodies := true:
- get: return collide_with_bodies
+ get:
+ return collide_with_bodies
set(value):
collide_with_bodies = value
_update()
@export_range(1, 200, 0.1) var cell_width := 20.0:
- get: return cell_width
+ get:
+ return cell_width
set(value):
cell_width = value
_update()
@export_range(1, 200, 0.1) var cell_height := 20.0:
- get: return cell_height
+ get:
+ return cell_height
set(value):
cell_height = value
- _update()
+ _update()
@export_range(1, 21, 2, "or_greater") var grid_size_x := 3:
- get: return grid_size_x
+ get:
+ return grid_size_x
set(value):
grid_size_x = value
_update()
@export_range(1, 21, 2, "or_greater") var grid_size_y := 3:
- get: return grid_size_y
+ get:
+ return grid_size_y
set(value):
grid_size_y = value
_update()
@@ -58,158 +66,169 @@ var _n_layers_per_cell: int
var _highlighted_cell_color: Color
var _standard_cell_color: Color
+
func get_observation():
return _obs_buffer
-
+
+
func _update():
if Engine.is_editor_hint():
if is_node_ready():
- _spawn_nodes()
+ _spawn_nodes()
+
func _ready() -> void:
_set_colors()
-
- if Engine.is_editor_hint():
+
+ if Engine.is_editor_hint():
if get_child_count() == 0:
_spawn_nodes()
else:
_spawn_nodes()
-
-
+
+
func _set_colors() -> void:
- _standard_cell_color = Color(100.0/255.0, 100.0/255.0, 100.0/255.0, 100.0/255.0)
- _highlighted_cell_color = Color(255.0/255.0, 100.0/255.0, 100.0/255.0, 100.0/255.0)
+ _standard_cell_color = Color(100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0)
+ _highlighted_cell_color = Color(255.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0)
+
func _get_collision_mapping() -> Dictionary:
# defines which layer is mapped to which cell obs index
var total_bits = 0
- var collision_mapping = {}
+ var collision_mapping = {}
for i in 32:
- var bit_mask = 2**i
+ var bit_mask = 2 ** i
if (detection_mask & bit_mask) > 0:
collision_mapping[i] = total_bits
total_bits += 1
-
+
return collision_mapping
+
func _spawn_nodes():
for cell in get_children():
- cell.name = "_%s" % cell.name # Otherwise naming below will fail
+ cell.name = "_%s" % cell.name # Otherwise naming below will fail
cell.queue_free()
-
+
_collision_mapping = _get_collision_mapping()
#prints("collision_mapping", _collision_mapping, len(_collision_mapping))
# allocate memory for the observations
_n_layers_per_cell = len(_collision_mapping)
_obs_buffer = PackedFloat64Array()
- _obs_buffer.resize(grid_size_x*grid_size_y*_n_layers_per_cell)
+ _obs_buffer.resize(grid_size_x * grid_size_y * _n_layers_per_cell)
_obs_buffer.fill(0)
#prints(len(_obs_buffer), _obs_buffer )
-
+
_rectangle_shape = RectangleShape2D.new()
_rectangle_shape.set_size(Vector2(cell_width, cell_height))
-
+
var shift := Vector2(
- -(grid_size_x/2)*cell_width,
- -(grid_size_y/2)*cell_height,
+ -(grid_size_x / 2) * cell_width,
+ -(grid_size_y / 2) * cell_height,
)
-
+
for i in grid_size_x:
for j in grid_size_y:
- var cell_position = Vector2(i*cell_width, j*cell_height) + shift
+ var cell_position = Vector2(i * cell_width, j * cell_height) + shift
_create_cell(i, j, cell_position)
-
-func _create_cell(i:int, j:int, position: Vector2):
- var cell : = Area2D.new()
+
+func _create_cell(i: int, j: int, position: Vector2):
+ var cell := Area2D.new()
cell.position = position
- cell.name = "GridCell %s %s" %[i, j]
+ cell.name = "GridCell %s %s" % [i, j]
cell.modulate = _standard_cell_color
-
+
if collide_with_areas:
cell.area_entered.connect(_on_cell_area_entered.bind(i, j))
cell.area_exited.connect(_on_cell_area_exited.bind(i, j))
-
+
if collide_with_bodies:
cell.body_entered.connect(_on_cell_body_entered.bind(i, j))
cell.body_exited.connect(_on_cell_body_exited.bind(i, j))
-
+
cell.collision_layer = 0
cell.collision_mask = detection_mask
cell.monitorable = true
add_child(cell)
cell.set_owner(get_tree().edited_scene_root)
- var col_shape : = CollisionShape2D.new()
+ var col_shape := CollisionShape2D.new()
col_shape.shape = _rectangle_shape
col_shape.name = "CollisionShape2D"
cell.add_child(col_shape)
col_shape.set_owner(get_tree().edited_scene_root)
-
+
if debug_view:
var quad = MeshInstance2D.new()
quad.name = "MeshInstance2D"
var quad_mesh = QuadMesh.new()
-
+
quad_mesh.set_size(Vector2(cell_width, cell_height))
-
+
quad.mesh = quad_mesh
cell.add_child(quad)
quad.set_owner(get_tree().edited_scene_root)
-func _update_obs(cell_i:int, cell_j:int, collision_layer:int, entered: bool):
+
+func _update_obs(cell_i: int, cell_j: int, collision_layer: int, entered: bool):
for key in _collision_mapping:
- var bit_mask = 2**key
+ var bit_mask = 2 ** key
if (collision_layer & bit_mask) > 0:
var collison_map_index = _collision_mapping[key]
-
+
var obs_index = (
- (cell_i * grid_size_x * _n_layers_per_cell) +
- (cell_j * _n_layers_per_cell) +
- collison_map_index
- )
+ (cell_i * grid_size_x * _n_layers_per_cell)
+ + (cell_j * _n_layers_per_cell)
+ + collison_map_index
+ )
#prints(obs_index, cell_i, cell_j)
if entered:
_obs_buffer[obs_index] += 1
else:
_obs_buffer[obs_index] -= 1
-func _toggle_cell(cell_i:int, cell_j:int):
- var cell = get_node_or_null("GridCell %s %s" %[cell_i, cell_j])
-
+
+func _toggle_cell(cell_i: int, cell_j: int):
+ var cell = get_node_or_null("GridCell %s %s" % [cell_i, cell_j])
+
if cell == null:
print("cell not found, returning")
-
+
var n_hits = 0
var start_index = (cell_i * grid_size_x * _n_layers_per_cell) + (cell_j * _n_layers_per_cell)
for i in _n_layers_per_cell:
- n_hits += _obs_buffer[start_index+i]
-
+ n_hits += _obs_buffer[start_index + i]
+
if n_hits > 0:
cell.modulate = _highlighted_cell_color
else:
cell.modulate = _standard_cell_color
-
-func _on_cell_area_entered(area:Area2D, cell_i:int, cell_j:int):
+
+
+func _on_cell_area_entered(area: Area2D, cell_i: int, cell_j: int):
#prints("_on_cell_area_entered", cell_i, cell_j)
_update_obs(cell_i, cell_j, area.collision_layer, true)
if debug_view:
_toggle_cell(cell_i, cell_j)
#print(_obs_buffer)
-func _on_cell_area_exited(area:Area2D, cell_i:int, cell_j:int):
+
+func _on_cell_area_exited(area: Area2D, cell_i: int, cell_j: int):
#prints("_on_cell_area_exited", cell_i, cell_j)
_update_obs(cell_i, cell_j, area.collision_layer, false)
if debug_view:
_toggle_cell(cell_i, cell_j)
-func _on_cell_body_entered(body: Node2D, cell_i:int, cell_j:int):
+
+func _on_cell_body_entered(body: Node2D, cell_i: int, cell_j: int):
#prints("_on_cell_body_entered", cell_i, cell_j)
_update_obs(cell_i, cell_j, body.collision_layer, true)
if debug_view:
_toggle_cell(cell_i, cell_j)
-func _on_cell_body_exited(body: Node2D, cell_i:int, cell_j:int):
+
+func _on_cell_body_exited(body: Node2D, cell_i: int, cell_j: int):
#prints("_on_cell_body_exited", cell_i, cell_j)
_update_obs(cell_i, cell_j, body.collision_layer, false)
if debug_view:
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/ISensor2D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/ISensor2D.gd
index ec20f08..67669a1 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/ISensor2D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/ISensor2D.gd
@@ -1,20 +1,25 @@
extends Node2D
class_name ISensor2D
-var _obs : Array = []
+var _obs: Array = []
var _active := false
+
func get_observation():
pass
-
+
+
func activate():
_active = true
-
+
+
func deactivate():
_active = false
+
func _update_observation():
pass
-
+
+
func reset():
pass
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/RaycastSensor2D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/RaycastSensor2D.gd
index 09363c4..9bb54ed 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/RaycastSensor2D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_2d/RaycastSensor2D.gd
@@ -3,50 +3,57 @@ extends ISensor2D
class_name RaycastSensor2D
@export_flags_2d_physics var collision_mask := 1:
- get: return collision_mask
+ get:
+ return collision_mask
set(value):
collision_mask = value
_update()
@export var collide_with_areas := false:
- get: return collide_with_areas
+ get:
+ return collide_with_areas
set(value):
collide_with_areas = value
_update()
@export var collide_with_bodies := true:
- get: return collide_with_bodies
+ get:
+ return collide_with_bodies
set(value):
collide_with_bodies = value
_update()
@export var n_rays := 16.0:
- get: return n_rays
+ get:
+ return n_rays
set(value):
n_rays = value
_update()
-
-@export_range(5,200,5.0) var ray_length := 200:
- get: return ray_length
+
+@export_range(5, 3000, 5.0) var ray_length := 200:
+ get:
+ return ray_length
set(value):
ray_length = value
_update()
-@export_range(5,360,5.0) var cone_width := 360.0:
- get: return cone_width
+@export_range(5, 360, 5.0) var cone_width := 360.0:
+ get:
+ return cone_width
set(value):
cone_width = value
_update()
-
-@export var debug_draw := true :
- get: return debug_draw
+
+@export var debug_draw := true:
+ get:
+ return debug_draw
set(value):
debug_draw = value
- _update()
-
+ _update()
var _angles = []
var rays := []
+
func _update():
if Engine.is_editor_hint():
if debug_draw:
@@ -56,63 +63,56 @@ func _update():
if ray is RayCast2D:
remove_child(ray)
+
func _ready() -> void:
_spawn_nodes()
+
func _spawn_nodes():
for ray in rays:
ray.queue_free()
rays = []
-
+
_angles = []
var step = cone_width / (n_rays)
- var start = step/2 - cone_width/2
-
+ var start = step / 2 - cone_width / 2
+
for i in n_rays:
var angle = start + i * step
var ray = RayCast2D.new()
- ray.set_target_position(Vector2(
- ray_length*cos(deg_to_rad(angle)),
- ray_length*sin(deg_to_rad(angle))
- ))
- ray.set_name("node_"+str(i))
- ray.enabled = true
+ ray.set_target_position(
+ Vector2(ray_length * cos(deg_to_rad(angle)), ray_length * sin(deg_to_rad(angle)))
+ )
+ ray.set_name("node_" + str(i))
+ ray.enabled = false
ray.collide_with_areas = collide_with_areas
ray.collide_with_bodies = collide_with_bodies
ray.collision_mask = collision_mask
add_child(ray)
rays.append(ray)
-
-
+
_angles.append(start + i * step)
-
-func _physics_process(delta: float) -> void:
- if self._active:
- self._obs = calculate_raycasts()
-
+
func get_observation() -> Array:
- if len(self._obs) == 0:
- print("obs was null, forcing raycast update")
- return self.calculate_raycasts()
- return self._obs
-
+ return self.calculate_raycasts()
+
func calculate_raycasts() -> Array:
var result = []
for ray in rays:
+ ray.enabled = true
ray.force_raycast_update()
var distance = _get_raycast_distance(ray)
result.append(distance)
+ ray.enabled = false
return result
-func _get_raycast_distance(ray : RayCast2D) -> float :
+
+func _get_raycast_distance(ray: RayCast2D) -> float:
if !ray.is_colliding():
return 0.0
-
+
var distance = (global_position - ray.get_collision_point()).length()
distance = clamp(distance, 0.0, ray_length)
return (ray_length - distance) / ray_length
-
-
-
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/GridSensor3D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/GridSensor3D.gd
index cfce8a8..03593cc 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/GridSensor3D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/GridSensor3D.gd
@@ -3,50 +3,58 @@ extends ISensor3D
class_name GridSensor3D
@export var debug_view := false:
- get: return debug_view
+ get:
+ return debug_view
set(value):
debug_view = value
_update()
-
+
@export_flags_3d_physics var detection_mask := 0:
- get: return detection_mask
+ get:
+ return detection_mask
set(value):
detection_mask = value
_update()
@export var collide_with_areas := false:
- get: return collide_with_areas
+ get:
+ return collide_with_areas
set(value):
collide_with_areas = value
_update()
@export var collide_with_bodies := false:
# NOTE! The sensor will not detect StaticBody3D, add an area to static bodies to detect them
- get: return collide_with_bodies
+ get:
+ return collide_with_bodies
set(value):
collide_with_bodies = value
_update()
@export_range(0.1, 2, 0.1) var cell_width := 1.0:
- get: return cell_width
+ get:
+ return cell_width
set(value):
cell_width = value
_update()
@export_range(0.1, 2, 0.1) var cell_height := 1.0:
- get: return cell_height
+ get:
+ return cell_height
set(value):
cell_height = value
- _update()
+ _update()
@export_range(1, 21, 2, "or_greater") var grid_size_x := 3:
- get: return grid_size_x
+ get:
+ return grid_size_x
set(value):
grid_size_x = value
_update()
@export_range(1, 21, 2, "or_greater") var grid_size_z := 3:
- get: return grid_size_z
+ get:
+ return grid_size_z
set(value):
grid_size_z = value
_update()
@@ -59,95 +67,106 @@ var _n_layers_per_cell: int
var _highlighted_box_material: StandardMaterial3D
var _standard_box_material: StandardMaterial3D
+
func get_observation():
return _obs_buffer
+
func reset():
_obs_buffer.fill(0)
+
func _update():
if Engine.is_editor_hint():
if is_node_ready():
- _spawn_nodes()
+ _spawn_nodes()
+
func _ready() -> void:
_make_materials()
-
- if Engine.is_editor_hint():
+
+ if Engine.is_editor_hint():
if get_child_count() == 0:
_spawn_nodes()
else:
_spawn_nodes()
-
+
+
func _make_materials() -> void:
if _highlighted_box_material != null and _standard_box_material != null:
return
-
+
_standard_box_material = StandardMaterial3D.new()
- _standard_box_material.set_transparency(1) # ALPHA
- _standard_box_material.albedo_color = Color(100.0/255.0, 100.0/255.0, 100.0/255.0, 100.0/255.0)
-
+ _standard_box_material.set_transparency(1) # ALPHA
+ _standard_box_material.albedo_color = Color(
+ 100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0
+ )
+
_highlighted_box_material = StandardMaterial3D.new()
- _highlighted_box_material.set_transparency(1) # ALPHA
- _highlighted_box_material.albedo_color = Color(255.0/255.0, 100.0/255.0, 100.0/255.0, 100.0/255.0)
+ _highlighted_box_material.set_transparency(1) # ALPHA
+ _highlighted_box_material.albedo_color = Color(
+ 255.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0, 100.0 / 255.0
+ )
+
func _get_collision_mapping() -> Dictionary:
# defines which layer is mapped to which cell obs index
var total_bits = 0
- var collision_mapping = {}
+ var collision_mapping = {}
for i in 32:
- var bit_mask = 2**i
+ var bit_mask = 2 ** i
if (detection_mask & bit_mask) > 0:
collision_mapping[i] = total_bits
total_bits += 1
-
+
return collision_mapping
+
func _spawn_nodes():
for cell in get_children():
- cell.name = "_%s" % cell.name # Otherwise naming below will fail
+ cell.name = "_%s" % cell.name # Otherwise naming below will fail
cell.queue_free()
-
+
_collision_mapping = _get_collision_mapping()
#prints("collision_mapping", _collision_mapping, len(_collision_mapping))
# allocate memory for the observations
_n_layers_per_cell = len(_collision_mapping)
_obs_buffer = PackedFloat64Array()
- _obs_buffer.resize(grid_size_x*grid_size_z*_n_layers_per_cell)
+ _obs_buffer.resize(grid_size_x * grid_size_z * _n_layers_per_cell)
_obs_buffer.fill(0)
#prints(len(_obs_buffer), _obs_buffer )
-
+
_box_shape = BoxShape3D.new()
_box_shape.set_size(Vector3(cell_width, cell_height, cell_width))
-
+
var shift := Vector3(
- -(grid_size_x/2)*cell_width,
+ -(grid_size_x / 2) * cell_width,
0,
- -(grid_size_z/2)*cell_width,
+ -(grid_size_z / 2) * cell_width,
)
-
+
for i in grid_size_x:
for j in grid_size_z:
- var cell_position = Vector3(i*cell_width, 0.0, j*cell_width) + shift
+ var cell_position = Vector3(i * cell_width, 0.0, j * cell_width) + shift
_create_cell(i, j, cell_position)
-
-func _create_cell(i:int, j:int, position: Vector3):
- var cell : = Area3D.new()
+
+func _create_cell(i: int, j: int, position: Vector3):
+ var cell := Area3D.new()
cell.position = position
- cell.name = "GridCell %s %s" %[i, j]
-
+ cell.name = "GridCell %s %s" % [i, j]
+
if collide_with_areas:
cell.area_entered.connect(_on_cell_area_entered.bind(i, j))
cell.area_exited.connect(_on_cell_area_exited.bind(i, j))
-
+
if collide_with_bodies:
cell.body_entered.connect(_on_cell_body_entered.bind(i, j))
cell.body_exited.connect(_on_cell_body_exited.bind(i, j))
-
+
# cell.body_shape_entered.connect(_on_cell_body_shape_entered.bind(i, j))
# cell.body_shape_exited.connect(_on_cell_body_shape_exited.bind(i, j))
-
+
cell.collision_layer = 0
cell.collision_mask = detection_mask
cell.monitorable = true
@@ -155,78 +174,84 @@ func _create_cell(i:int, j:int, position: Vector3):
add_child(cell)
cell.set_owner(get_tree().edited_scene_root)
- var col_shape : = CollisionShape3D.new()
+ var col_shape := CollisionShape3D.new()
col_shape.shape = _box_shape
col_shape.name = "CollisionShape3D"
cell.add_child(col_shape)
col_shape.set_owner(get_tree().edited_scene_root)
-
+
if debug_view:
var box = MeshInstance3D.new()
box.name = "MeshInstance3D"
var box_mesh = BoxMesh.new()
-
+
box_mesh.set_size(Vector3(cell_width, cell_height, cell_width))
box_mesh.material = _standard_box_material
-
+
box.mesh = box_mesh
cell.add_child(box)
box.set_owner(get_tree().edited_scene_root)
-func _update_obs(cell_i:int, cell_j:int, collision_layer:int, entered: bool):
+
+func _update_obs(cell_i: int, cell_j: int, collision_layer: int, entered: bool):
for key in _collision_mapping:
- var bit_mask = 2**key
+ var bit_mask = 2 ** key
if (collision_layer & bit_mask) > 0:
var collison_map_index = _collision_mapping[key]
-
+
var obs_index = (
- (cell_i * grid_size_x * _n_layers_per_cell) +
- (cell_j * _n_layers_per_cell) +
- collison_map_index
- )
+ (cell_i * grid_size_x * _n_layers_per_cell)
+ + (cell_j * _n_layers_per_cell)
+ + collison_map_index
+ )
#prints(obs_index, cell_i, cell_j)
if entered:
_obs_buffer[obs_index] += 1
else:
_obs_buffer[obs_index] -= 1
-func _toggle_cell(cell_i:int, cell_j:int):
- var cell = get_node_or_null("GridCell %s %s" %[cell_i, cell_j])
-
+
+func _toggle_cell(cell_i: int, cell_j: int):
+ var cell = get_node_or_null("GridCell %s %s" % [cell_i, cell_j])
+
if cell == null:
print("cell not found, returning")
-
+
var n_hits = 0
var start_index = (cell_i * grid_size_x * _n_layers_per_cell) + (cell_j * _n_layers_per_cell)
for i in _n_layers_per_cell:
- n_hits += _obs_buffer[start_index+i]
-
+ n_hits += _obs_buffer[start_index + i]
+
var cell_mesh = cell.get_node_or_null("MeshInstance3D")
if n_hits > 0:
cell_mesh.mesh.material = _highlighted_box_material
else:
cell_mesh.mesh.material = _standard_box_material
-
-func _on_cell_area_entered(area:Area3D, cell_i:int, cell_j:int):
+
+
+func _on_cell_area_entered(area: Area3D, cell_i: int, cell_j: int):
#prints("_on_cell_area_entered", cell_i, cell_j)
_update_obs(cell_i, cell_j, area.collision_layer, true)
if debug_view:
_toggle_cell(cell_i, cell_j)
#print(_obs_buffer)
-func _on_cell_area_exited(area:Area3D, cell_i:int, cell_j:int):
+
+func _on_cell_area_exited(area: Area3D, cell_i: int, cell_j: int):
#prints("_on_cell_area_exited", cell_i, cell_j)
_update_obs(cell_i, cell_j, area.collision_layer, false)
if debug_view:
_toggle_cell(cell_i, cell_j)
-func _on_cell_body_entered(body: Node3D, cell_i:int, cell_j:int):
+
+func _on_cell_body_entered(body: Node3D, cell_i: int, cell_j: int):
#prints("_on_cell_body_entered", cell_i, cell_j)
_update_obs(cell_i, cell_j, body.collision_layer, true)
if debug_view:
_toggle_cell(cell_i, cell_j)
-func _on_cell_body_exited(body: Node3D, cell_i:int, cell_j:int):
+
+func _on_cell_body_exited(body: Node3D, cell_i: int, cell_j: int):
#prints("_on_cell_body_exited", cell_i, cell_j)
_update_obs(cell_i, cell_j, body.collision_layer, false)
if debug_view:
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/ISensor3D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/ISensor3D.gd
index d57503b..aca3c2d 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/ISensor3D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/ISensor3D.gd
@@ -1,20 +1,25 @@
extends Node3D
class_name ISensor3D
-var _obs : Array = []
+var _obs: Array = []
var _active := false
+
func get_observation():
pass
-
+
+
func activate():
_active = true
-
+
+
func deactivate():
_active = false
+
func _update_observation():
pass
-
+
+
func reset():
pass
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.gd
index 1037e97..78bcbf8 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.gd
@@ -2,20 +2,66 @@ extends Node3D
class_name RGBCameraSensor3D
var camera_pixels = null
-@onready var camera_texture := $Control/TextureRect/CameraTexture as Sprite2D
+@onready var camera_texture := $Control/CameraTexture as Sprite2D
+@onready var processed_texture := $Control/ProcessedTexture as Sprite2D
@onready var sub_viewport := $SubViewport as SubViewport
+@onready var displayed_image: ImageTexture
+
+## We need to encode the image differently when training or running inference
+@export var training_mode: bool
+
+@export var render_image_resolution := Vector2(36, 36)
+## Display size does not affect rendered or sent image resolution.
+## Scale is relative to either render image or downscale image resolution
+## depending on which mode is set.
+@export var displayed_image_scale_factor := Vector2(8, 8)
+
+@export_group("Downscale image options")
+## Enable to downscale the rendered image before sending the obs.
+@export var downscale_image: bool = false
+## If downscale_image is true, will display the downscaled image instead of rendered image.
+@export var display_downscaled_image: bool = true
+## This is the resolution of the image that will be sent after downscaling
+@export var resized_image_resolution := Vector2(36, 36)
+
+
+func _ready():
+ sub_viewport.size = render_image_resolution
+ camera_texture.scale = displayed_image_scale_factor
+
+ if downscale_image and display_downscaled_image:
+ camera_texture.visible = false
+ processed_texture.scale = displayed_image_scale_factor
+ else:
+ processed_texture.visible = false
func get_camera_pixel_encoding():
- return camera_texture.get_texture().get_image().get_data().hex_encode()
+ var image := camera_texture.get_texture().get_image() as Image
+
+ if downscale_image:
+ image.resize(
+ resized_image_resolution.x, resized_image_resolution.y, Image.INTERPOLATE_NEAREST
+ )
+ if display_downscaled_image:
+ if not processed_texture.texture:
+ displayed_image = ImageTexture.create_from_image(image)
+ processed_texture.texture = displayed_image
+ else:
+ displayed_image.update(image)
+
+ var results = image.get_data().hex_encode() if training_mode else image.get_data()
+ return results
func get_camera_shape() -> Array:
- assert(
- sub_viewport.size.x >= 36 and sub_viewport.size.y >= 36,
- "SubViewport size must be 36x36 or larger."
- )
+ var size = resized_image_resolution if downscale_image else render_image_resolution
+
+ #assert(
+ #size.x >= 36 and size.y >= 36,
+ #"Camera sensor sent image resolution must be 36x36 or larger."
+ #)
if sub_viewport.transparent_bg:
- return [4, sub_viewport.size.y, sub_viewport.size.x]
+ return [size.y, size.x, 4]
else:
- return [3, sub_viewport.size.y, sub_viewport.size.x]
+ return [size.y, size.x, 3]
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.tscn b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.tscn
index 052b557..d58649c 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.tscn
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.tscn
@@ -2,20 +2,20 @@
[ext_resource type="Script" path="res://addons/godot_rl_agents/sensors/sensors_3d/RGBCameraSensor3D.gd" id="1"]
-[sub_resource type="ViewportTexture" id="1"]
+[sub_resource type="ViewportTexture" id="ViewportTexture_y72s3"]
viewport_path = NodePath("SubViewport")
[node name="RGBCameraSensor3D" type="Node3D"]
script = ExtResource("1")
-[node name="RemoteTransform3D" type="RemoteTransform3D" parent="."]
-remote_path = NodePath("../SubViewport/Camera3D")
+[node name="RemoteTransform" type="RemoteTransform3D" parent="."]
+remote_path = NodePath("../SubViewport/Camera")
[node name="SubViewport" type="SubViewport" parent="."]
-size = Vector2i(32, 32)
+size = Vector2i(36, 36)
render_target_update_mode = 3
-[node name="Camera3D" type="Camera3D" parent="SubViewport"]
+[node name="Camera" type="Camera3D" parent="SubViewport"]
near = 0.5
[node name="Control" type="Control" parent="."]
@@ -25,17 +25,11 @@ anchor_right = 1.0
anchor_bottom = 1.0
grow_horizontal = 2
grow_vertical = 2
+metadata/_edit_use_anchors_ = true
-[node name="TextureRect" type="ColorRect" parent="Control"]
-layout_mode = 0
-offset_left = 1096.0
-offset_top = 534.0
-offset_right = 1114.0
-offset_bottom = 552.0
-scale = Vector2(10, 10)
-color = Color(0.00784314, 0.00784314, 0.00784314, 1)
-
-[node name="CameraTexture" type="Sprite2D" parent="Control/TextureRect"]
-texture = SubResource("1")
-offset = Vector2(9, 9)
-flip_v = true
+[node name="CameraTexture" type="Sprite2D" parent="Control"]
+texture = SubResource("ViewportTexture_y72s3")
+centered = false
+
+[node name="ProcessedTexture" type="Sprite2D" parent="Control"]
+centered = false
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RaycastSensor3D.gd b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RaycastSensor3D.gd
index 1f36193..1357529 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RaycastSensor3D.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sensors/sensors_3d/RaycastSensor3D.gd
@@ -2,70 +2,86 @@
extends ISensor3D
class_name RayCastSensor3D
@export_flags_3d_physics var collision_mask = 1:
- get: return collision_mask
+ get:
+ return collision_mask
set(value):
collision_mask = value
_update()
@export_flags_3d_physics var boolean_class_mask = 1:
- get: return boolean_class_mask
+ get:
+ return boolean_class_mask
set(value):
boolean_class_mask = value
_update()
@export var n_rays_width := 6.0:
- get: return n_rays_width
+ get:
+ return n_rays_width
set(value):
n_rays_width = value
_update()
-
+
@export var n_rays_height := 6.0:
- get: return n_rays_height
+ get:
+ return n_rays_height
set(value):
n_rays_height = value
_update()
@export var ray_length := 10.0:
- get: return ray_length
+ get:
+ return ray_length
set(value):
ray_length = value
_update()
-
+
@export var cone_width := 60.0:
- get: return cone_width
+ get:
+ return cone_width
set(value):
cone_width = value
_update()
-
+
@export var cone_height := 60.0:
- get: return cone_height
+ get:
+ return cone_height
set(value):
cone_height = value
_update()
@export var collide_with_areas := false:
- get: return collide_with_areas
+ get:
+ return collide_with_areas
set(value):
collide_with_areas = value
_update()
-
+
@export var collide_with_bodies := true:
- get: return collide_with_bodies
+ get:
+ return collide_with_bodies
set(value):
collide_with_bodies = value
_update()
@export var class_sensor := false
-
+
var rays := []
var geo = null
+
func _update():
if Engine.is_editor_hint():
- _spawn_nodes()
+ if is_node_ready():
+ _spawn_nodes()
func _ready() -> void:
- _spawn_nodes()
+ if Engine.is_editor_hint():
+ if get_child_count() == 0:
+ _spawn_nodes()
+ else:
+ _spawn_nodes()
+
func _spawn_nodes():
print("spawning nodes")
@@ -75,15 +91,15 @@ func _spawn_nodes():
geo.clear()
#$Lines.remove_points()
rays = []
-
+
var horizontal_step = cone_width / (n_rays_width)
var vertical_step = cone_height / (n_rays_height)
-
- var horizontal_start = horizontal_step/2 - cone_width/2
- var vertical_start = vertical_step/2 - cone_height/2
+
+ var horizontal_start = horizontal_step / 2 - cone_width / 2
+ var vertical_start = vertical_step / 2 - cone_height / 2
var points = []
-
+
for i in n_rays_width:
for j in n_rays_height:
var angle_w = horizontal_start + i * horizontal_step
@@ -94,9 +110,9 @@ func _spawn_nodes():
ray.set_target_position(cast_to)
points.append(cast_to)
-
- ray.set_name("node_"+str(i)+" "+str(j))
- ray.enabled = true
+
+ ray.set_name("node_" + str(i) + " " + str(j))
+ ray.enabled = true
ray.collide_with_bodies = collide_with_bodies
ray.collide_with_areas = collide_with_areas
ray.collision_mask = collision_mask
@@ -104,15 +120,17 @@ func _spawn_nodes():
ray.set_owner(get_tree().edited_scene_root)
rays.append(ray)
ray.force_raycast_update()
-
+
+
# if Engine.editor_hint:
# _create_debug_lines(points)
-
+
+
func _create_debug_lines(points):
- if not geo:
+ if not geo:
geo = ImmediateMesh.new()
add_child(geo)
-
+
geo.clear()
geo.begin(Mesh.PRIMITIVE_LINES)
for point in points:
@@ -121,20 +139,24 @@ func _create_debug_lines(points):
geo.add_vertex(point)
geo.end()
+
func display():
if geo:
geo.display()
-
+
+
func to_spherical_coords(r, inc, azimuth) -> Vector3:
return Vector3(
- r*sin(deg_to_rad(inc))*cos(deg_to_rad(azimuth)),
- r*sin(deg_to_rad(azimuth)),
- r*cos(deg_to_rad(inc))*cos(deg_to_rad(azimuth))
+ r * sin(deg_to_rad(inc)) * cos(deg_to_rad(azimuth)),
+ r * sin(deg_to_rad(azimuth)),
+ r * cos(deg_to_rad(inc)) * cos(deg_to_rad(azimuth))
)
-
+
+
func get_observation() -> Array:
return self.calculate_raycasts()
+
func calculate_raycasts() -> Array:
var result = []
for ray in rays:
@@ -144,19 +166,20 @@ func calculate_raycasts() -> Array:
result.append(distance)
if class_sensor:
- var hit_class = 0
+ var hit_class: float = 0
if ray.get_collider():
var hit_collision_layer = ray.get_collider().collision_layer
hit_collision_layer = hit_collision_layer & collision_mask
hit_class = (hit_collision_layer & boolean_class_mask) > 0
- result.append(hit_class)
+ result.append(float(hit_class))
ray.set_enabled(false)
return result
-func _get_raycast_distance(ray : RayCast3D) -> float :
+
+func _get_raycast_distance(ray: RayCast3D) -> float:
if !ray.is_colliding():
return 0.0
-
+
var distance = (global_transform.origin - ray.get_collision_point()).length()
distance = clamp(distance, 0.0, ray_length)
return (ray_length - distance) / ray_length
diff --git a/examples/VirtualCamera/addons/godot_rl_agents/sync.gd b/examples/VirtualCamera/addons/godot_rl_agents/sync.gd
index 884e4e4..8e43039 100644
--- a/examples/VirtualCamera/addons/godot_rl_agents/sync.gd
+++ b/examples/VirtualCamera/addons/godot_rl_agents/sync.gd
@@ -1,20 +1,43 @@
extends Node
+
# --fixed-fps 2000 --disable-render-loop
+
+enum ControlModes { HUMAN, TRAINING, ONNX_INFERENCE }
+@export var control_mode: ControlModes = ControlModes.TRAINING
@export_range(1, 10, 1, "or_greater") var action_repeat := 8
-@export_range(1, 10, 1, "or_greater") var speed_up = 1
+@export_range(0, 10, 0.1, "or_greater") var speed_up := 1.0
@export var onnx_model_path := ""
+# Onnx model stored for each requested path
+var onnx_models: Dictionary
+
@onready var start_time = Time.get_ticks_msec()
const MAJOR_VERSION := "0"
-const MINOR_VERSION := "3"
+const MINOR_VERSION := "7"
const DEFAULT_PORT := "11008"
const DEFAULT_SEED := "1"
-var stream : StreamPeerTCP = null
+var stream: StreamPeerTCP = null
var connected = false
var message_center
var should_connect = true
-var agents
+
+var all_agents: Array
+var agents_training: Array
+## Policy name of each agent, for use with multi-policy multi-agent RL cases
+var agents_training_policy_names: Array[String] = ["shared_policy"]
+var agents_inference: Array
+var agents_heuristic: Array
+
+## For recording expert demos
+var agent_demo_record: Node
+## File path for writing recorded trajectories
+var expert_demo_save_path: String
+## Stores recorded trajectories
+var demo_trajectories: Array
+## A trajectory includes obs: Array, acts: Array, terminal (set in Python env instead)
+var current_demo_trajectory: Array
+
var need_to_send_obs = false
var args = null
var initialized = false
@@ -22,141 +45,329 @@ var just_reset = false
var onnx_model = null
var n_action_steps = 0
-var _action_space : Dictionary
-var _obs_space : Dictionary
+var _action_space_training: Array[Dictionary] = []
+var _action_space_inference: Array[Dictionary] = []
+var _obs_space_training: Array[Dictionary] = []
# Called when the node enters the scene tree for the first time.
-
func _ready():
await get_tree().root.ready
- get_tree().set_pause(true)
+ get_tree().set_pause(true)
_initialize()
await get_tree().create_timer(1.0).timeout
- get_tree().set_pause(false)
-
+ get_tree().set_pause(false)
+
+
func _initialize():
_get_agents()
- _obs_space = agents[0].get_obs_space()
- _action_space = agents[0].get_action_space()
args = _get_args()
- Engine.physics_ticks_per_second = _get_speedup() * 60 # Replace with function body.
+ Engine.physics_ticks_per_second = _get_speedup() * 60 # Replace with function body.
Engine.time_scale = _get_speedup() * 1.0
- prints("physics ticks", Engine.physics_ticks_per_second, Engine.time_scale, _get_speedup(), speed_up)
-
- # Run inference if onnx model path is set, otherwise wait for server connection
- var run_onnx_model_inference : bool = onnx_model_path != ""
- if run_onnx_model_inference:
- assert(FileAccess.file_exists(onnx_model_path), "Onnx Model Path set on Sync node does not exist: " + onnx_model_path)
- onnx_model = ONNXModel.new(onnx_model_path, 1)
- _set_heuristic("model")
- else:
+ prints(
+ "physics ticks",
+ Engine.physics_ticks_per_second,
+ Engine.time_scale,
+ _get_speedup(),
+ speed_up
+ )
+
+ _set_heuristic("human", all_agents)
+
+ _initialize_training_agents()
+ _initialize_inference_agents()
+ _initialize_demo_recording()
+
+ _set_seed()
+ _set_action_repeat()
+ initialized = true
+
+
+func _initialize_training_agents():
+ if agents_training.size() > 0:
+ _obs_space_training.resize(agents_training.size())
+ _action_space_training.resize(agents_training.size())
+ for agent_idx in range(0, agents_training.size()):
+ _obs_space_training[agent_idx] = agents_training[agent_idx].get_obs_space()
+ _action_space_training[agent_idx] = agents_training[agent_idx].get_action_space()
connected = connect_to_server()
if connected:
- _set_heuristic("model")
+ _set_heuristic("model", agents_training)
_handshake()
_send_env_info()
else:
- _set_heuristic("human")
-
- _set_seed()
- _set_action_repeat()
- initialized = true
-
-func _physics_process(delta):
+ push_warning(
+ "Couldn't connect to Python server, using human controls instead. ",
+ "Did you start the training server using e.g. `gdrl` from the console?"
+ )
+
+
+func _initialize_inference_agents():
+ if agents_inference.size() > 0:
+ if control_mode == ControlModes.ONNX_INFERENCE:
+ assert(
+ FileAccess.file_exists(onnx_model_path),
+ "Onnx Model Path set on Sync node does not exist: %s" % onnx_model_path
+ )
+ onnx_models[onnx_model_path] = ONNXModel.new(onnx_model_path, 1)
+
+ for agent in agents_inference:
+ var action_space = agent.get_action_space()
+ _action_space_inference.append(action_space)
+
+ var agent_onnx_model: ONNXModel
+ if agent.onnx_model_path.is_empty():
+ assert(
+ onnx_models.has(onnx_model_path),
+ (
+ "Node %s has no onnx model path set " % agent.get_path()
+ + "and sync node's control mode is not set to OnnxInference. "
+ + "Either add the path to the AIController, "
+ + "or if you want to use the path set on sync node instead, "
+ + "set control mode to OnnxInference."
+ )
+ )
+ prints(
+ "Info: AIController %s" % agent.get_path(),
+ "has no onnx model path set.",
+ "Using path set on the sync node instead."
+ )
+ agent_onnx_model = onnx_models[onnx_model_path]
+ else:
+ if not onnx_models.has(agent.onnx_model_path):
+ assert(
+ FileAccess.file_exists(agent.onnx_model_path),
+ (
+ "Onnx Model Path set on %s node does not exist: %s"
+ % [agent.get_path(), agent.onnx_model_path]
+ )
+ )
+ onnx_models[agent.onnx_model_path] = ONNXModel.new(agent.onnx_model_path, 1)
+ agent_onnx_model = onnx_models[agent.onnx_model_path]
+
+ agent.onnx_model = agent_onnx_model
+ if not agent_onnx_model.action_means_only_set:
+ agent_onnx_model.set_action_means_only(action_space)
+
+ _set_heuristic("model", agents_inference)
+
+
+func _initialize_demo_recording():
+ if agent_demo_record:
+ expert_demo_save_path = agent_demo_record.expert_demo_save_path
+ assert(
+ not expert_demo_save_path.is_empty(),
+ "Expert demo save path set in %s is empty." % agent_demo_record.get_path()
+ )
+
+ InputMap.add_action("RemoveLastDemoEpisode")
+ InputMap.action_add_event(
+ "RemoveLastDemoEpisode", agent_demo_record.remove_last_episode_key
+ )
+ current_demo_trajectory.resize(2)
+ current_demo_trajectory[0] = []
+ current_demo_trajectory[1] = []
+ agent_demo_record.heuristic = "demo_record"
+
+
+func _physics_process(_delta):
# two modes, human control, agent control
# pause tree, send obs, get actions, set actions, unpause tree
+
+ _demo_record_process()
+
if n_action_steps % action_repeat != 0:
n_action_steps += 1
return
n_action_steps += 1
-
+
+ _training_process()
+ _inference_process()
+ _heuristic_process()
+
+
+func _training_process():
if connected:
- get_tree().set_pause(true)
-
+ get_tree().set_pause(true)
+
if just_reset:
just_reset = false
- var obs = _get_obs_from_agents()
-
- var reply = {
- "type": "reset",
- "obs": obs
- }
+ var obs = _get_obs_from_agents(agents_training)
+
+ var reply = {"type": "reset", "obs": obs}
_send_dict_as_json_message(reply)
# this should go straight to getting the action and setting it checked the agent, no need to perform one phyics tick
- get_tree().set_pause(false)
+ get_tree().set_pause(false)
return
-
+
if need_to_send_obs:
need_to_send_obs = false
var reward = _get_reward_from_agents()
var done = _get_done_from_agents()
#_reset_agents_if_done() # this ensures the new observation is from the next env instance : NEEDS REFACTOR
-
- var obs = _get_obs_from_agents()
-
- var reply = {
- "type": "step",
- "obs": obs,
- "reward": reward,
- "done": done
- }
+
+ var obs = _get_obs_from_agents(agents_training)
+
+ var reply = {"type": "step", "obs": obs, "reward": reward, "done": done}
_send_dict_as_json_message(reply)
-
+
var handled = handle_message()
-
- elif onnx_model != null:
- var obs : Array = _get_obs_from_agents()
-
+
+
+func _inference_process():
+ if agents_inference.size() > 0:
+ var obs: Array = _get_obs_from_agents(agents_inference)
var actions = []
- for o in obs:
- var action = onnx_model.run_inference(o["obs"], 1.0)
- action["output"] = clamp_array(action["output"], -1.0, 1.0)
- var action_dict = _extract_action_dict(action["output"])
+
+ for agent_id in range(0, agents_inference.size()):
+ var model: ONNXModel = agents_inference[agent_id].onnx_model
+ var action = model.run_inference(
+ obs[agent_id]["camera_2d"], 1.0
+ )
+ var action_dict = _extract_action_dict(
+ action["output"], _action_space_inference[agent_id], model.action_means_only
+ )
actions.append(action_dict)
-
- _set_agent_actions(actions)
- need_to_send_obs = true
- get_tree().set_pause(false)
- _reset_agents_if_done()
-
+
+ _set_agent_actions(actions, agents_inference)
+ _reset_agents_if_done(agents_inference)
+ get_tree().set_pause(false)
+
+
+func _demo_record_process():
+ if not agent_demo_record:
+ return
+
+ if Input.is_action_just_pressed("RemoveLastDemoEpisode"):
+ print("[Sync script][Demo recorder] Removing last recorded episode.")
+ demo_trajectories.remove_at(demo_trajectories.size() - 1)
+ print("Remaining episode count: %d" % demo_trajectories.size())
+
+ if n_action_steps % agent_demo_record.action_repeat != 0:
+ return
+
+ var obs_dict: Dictionary = agent_demo_record.get_obs()
+
+ # Get the current obs from the agent
+ assert(
+ obs_dict.has("obs"),
+ "Demo recorder needs an 'obs' key in get_obs() returned dictionary to record obs from."
+ )
+ current_demo_trajectory[0].append(obs_dict.obs)
+
+ # Get the action applied for the current obs from the agent
+ agent_demo_record.set_action()
+ var acts = agent_demo_record.get_action()
+
+ var terminal = agent_demo_record.get_done()
+ # Record actions only for non-terminal states
+ if terminal:
+ agent_demo_record.set_done_false()
else:
- _reset_agents_if_done()
+ current_demo_trajectory[1].append(acts)
-func _extract_action_dict(action_array: Array):
+ if terminal:
+ #current_demo_trajectory[2].append(true)
+ demo_trajectories.append(current_demo_trajectory.duplicate(true))
+ print("[Sync script][Demo recorder] Recorded episode count: %d" % demo_trajectories.size())
+ current_demo_trajectory[0].clear()
+ current_demo_trajectory[1].clear()
+
+
+func _heuristic_process():
+ for agent in agents_heuristic:
+ _reset_agents_if_done(agents_heuristic)
+
+
+func _extract_action_dict(action_array: Array, action_space: Dictionary, action_means_only: bool):
var index = 0
var result = {}
- for key in _action_space.keys():
- var size = _action_space[key]["size"]
- if _action_space[key]["action_type"] == "discrete":
- result[key] = round(action_array[index])
+ for key in action_space.keys():
+ var size = action_space[key]["size"]
+ var action_type = action_space[key]["action_type"]
+ if action_type == "discrete":
+ var largest_logit: float # Value of the largest logit for this action in the actions array
+ var largest_logit_idx: int # Index of the largest logit for this action in the actions array
+ for logit_idx in range(0, size):
+ var logit_value = action_array[index + logit_idx]
+ if logit_value > largest_logit:
+ largest_logit = logit_value
+ largest_logit_idx = logit_idx
+ result[key] = largest_logit_idx # Index of the largest logit is the discrete action value
+ index += size
+ elif action_type == "continuous":
+ # For continous actions, we only take the action mean values
+ result[key] = clamp_array(action_array.slice(index, index + size), -1.0, 1.0)
+ if action_means_only:
+ index += size # model only outputs action means, so we move index by size
+ else:
+ index += size * 2 # model outputs logstd after action mean, we skip the logstd part
+
else:
- result[key] = action_array.slice(index,index+size)
- index += size
+ assert(false, 'Only "discrete" and "continuous" action types supported. Found: %s action type set.' % action_type)
+
return result
+
+## For AIControllers that inherit mode from sync, sets the correct mode.
+func _set_agent_mode(agent: Node):
+ var agent_inherits_mode: bool = agent.control_mode == agent.ControlModes.INHERIT_FROM_SYNC
+
+ if agent_inherits_mode:
+ match control_mode:
+ ControlModes.HUMAN:
+ agent.control_mode = agent.ControlModes.HUMAN
+ ControlModes.TRAINING:
+ agent.control_mode = agent.ControlModes.TRAINING
+ ControlModes.ONNX_INFERENCE:
+ agent.control_mode = agent.ControlModes.ONNX_INFERENCE
+
+
func _get_agents():
- agents = get_tree().get_nodes_in_group("AGENT")
+ all_agents = get_tree().get_nodes_in_group("AGENT")
+ for agent in all_agents:
+ _set_agent_mode(agent)
+
+ if agent.control_mode == agent.ControlModes.TRAINING:
+ agents_training.append(agent)
+ elif agent.control_mode == agent.ControlModes.ONNX_INFERENCE:
+ agents_inference.append(agent)
+ elif agent.control_mode == agent.ControlModes.HUMAN:
+ agents_heuristic.append(agent)
+ elif agent.control_mode == agent.ControlModes.RECORD_EXPERT_DEMOS:
+ assert(
+ not agent_demo_record,
+ "Currently only a single AIController can be used for recording expert demos."
+ )
+ agent_demo_record = agent
+
+ var training_agent_count = agents_training.size()
+ agents_training_policy_names.resize(training_agent_count)
+ for i in range(0, training_agent_count):
+ agents_training_policy_names[i] = agents_training[i].policy_name
+
-func _set_heuristic(heuristic):
+func _set_heuristic(heuristic, agents: Array):
for agent in agents:
agent.set_heuristic(heuristic)
+
func _handshake():
print("performing handshake")
-
+
var json_dict = _get_dict_json_message()
assert(json_dict["type"] == "handshake")
var major_version = json_dict["major_version"]
var minor_version = json_dict["minor_version"]
if major_version != MAJOR_VERSION:
- print("WARNING: major verison mismatch ", major_version, " ", MAJOR_VERSION)
+ print("WARNING: major verison mismatch ", major_version, " ", MAJOR_VERSION)
if minor_version != MINOR_VERSION:
print("WARNING: minor verison mismatch ", minor_version, " ", MINOR_VERSION)
-
+
print("handshake complete")
+
func _get_dict_json_message():
# returns a dictionary from of the most recent message
# this is not waiting
@@ -168,45 +379,49 @@ func _get_dict_json_message():
return null
OS.delay_usec(10)
-
+
var message = stream.get_string()
var json_data = JSON.parse_string(message)
-
+
return json_data
+
func _send_dict_as_json_message(dict):
- stream.put_string(JSON.stringify(dict))
+ stream.put_string(JSON.stringify(dict, "", false))
+
func _send_env_info():
var json_dict = _get_dict_json_message()
assert(json_dict["type"] == "env_info")
-
var message = {
- "type" : "env_info",
- "observation_space": _obs_space,
- "action_space":_action_space,
- "n_agents": len(agents)
- }
+ "type": "env_info",
+ "observation_space": _obs_space_training,
+ "action_space": _action_space_training,
+ "n_agents": len(agents_training),
+ "agent_policy_names": agents_training_policy_names
+ }
_send_dict_as_json_message(message)
+
func connect_to_server():
print("Waiting for one second to allow server to start")
OS.delay_msec(1000)
print("trying to connect to server")
stream = StreamPeerTCP.new()
-
+
# "localhost" was not working on windows VM, had to use the IP
var ip = "127.0.0.1"
var port = _get_port()
var connect = stream.connect_to_host(ip, port)
- stream.set_no_delay(true) # TODO check if this improves performance or not
+ stream.set_no_delay(true) # TODO check if this improves performance or not
stream.poll()
# Fetch the status until it is either connected (2) or failed to connect (3)
while stream.get_status() < 2:
stream.poll()
return stream.get_status() == 2
+
func _get_args():
print("getting command line arguments")
var arguments = {}
@@ -220,41 +435,45 @@ func _get_args():
# with the value set to an empty string.
arguments[argument.lstrip("--")] = ""
- return arguments
+ return arguments
+
func _get_speedup():
print(args)
- return args.get("speedup", str(speed_up)).to_int()
+ return args.get("speedup", str(speed_up)).to_float()
-func _get_port():
+
+func _get_port():
return args.get("port", DEFAULT_PORT).to_int()
+
func _set_seed():
var _seed = args.get("env_seed", DEFAULT_SEED).to_int()
seed(_seed)
+
func _set_action_repeat():
action_repeat = args.get("action_repeat", str(action_repeat)).to_int()
-
+
+
func disconnect_from_server():
stream.disconnect_from_host()
-
func handle_message() -> bool:
# get json message: reset, step, close
var message = _get_dict_json_message()
if message["type"] == "close":
print("received close message, closing game")
get_tree().quit()
- get_tree().set_pause(false)
+ get_tree().set_pause(false)
return true
-
+
if message["type"] == "reset":
print("resetting all agents")
- _reset_all_agents()
+ _reset_agents()
just_reset = true
- get_tree().set_pause(false)
+ get_tree().set_pause(false)
#print("resetting forcing draw")
# RenderingServer.force_draw()
# var obs = _get_obs_from_agents()
@@ -263,76 +482,98 @@ func handle_message() -> bool:
# "type": "reset",
# "obs": obs
# }
-# _send_dict_as_json_message(reply)
+# _send_dict_as_json_message(reply)
return true
-
+
if message["type"] == "call":
var method = message["method"]
var returns = _call_method_on_agents(method)
- var reply = {
- "type": "call",
- "returns": returns
- }
+ var reply = {"type": "call", "returns": returns}
print("calling method from Python")
- _send_dict_as_json_message(reply)
+ _send_dict_as_json_message(reply)
return handle_message()
-
+
if message["type"] == "action":
var action = message["action"]
- _set_agent_actions(action)
+ _set_agent_actions(action, agents_training)
need_to_send_obs = true
- get_tree().set_pause(false)
+ get_tree().set_pause(false)
return true
-
+
print("message was not handled")
return false
+
func _call_method_on_agents(method):
var returns = []
- for agent in agents:
+ for agent in all_agents:
returns.append(agent.call(method))
-
+
return returns
-func _reset_agents_if_done():
+func _reset_agents_if_done(agents = all_agents):
for agent in agents:
- if agent.get_done():
+ if agent.get_done():
agent.set_done_false()
-func _reset_all_agents():
+
+func _reset_agents(agents = all_agents):
for agent in agents:
agent.needs_reset = true
- #agent.reset()
+ #agent.reset()
-func _get_obs_from_agents():
+
+func _get_obs_from_agents(agents: Array = all_agents):
var obs = []
for agent in agents:
obs.append(agent.get_obs())
-
return obs
-
-func _get_reward_from_agents():
- var rewards = []
+
+
+func _get_reward_from_agents(agents: Array = agents_training):
+ var rewards = []
for agent in agents:
rewards.append(agent.get_reward())
agent.zero_reward()
- return rewards
-
-func _get_done_from_agents():
- var dones = []
+ return rewards
+
+
+func _get_done_from_agents(agents: Array = agents_training):
+ var dones = []
for agent in agents:
var done = agent.get_done()
- if done: agent.set_done_false()
+ if done:
+ agent.set_done_false()
dones.append(done)
- return dones
-
-func _set_agent_actions(actions):
+ return dones
+
+
+func _set_agent_actions(actions, agents: Array = all_agents):
for i in range(len(actions)):
agents[i].set_action(actions[i])
-
-func clamp_array(arr : Array, min:float, max:float):
- var output : Array = []
+
+
+func clamp_array(arr: Array, min: float, max: float):
+ var output: Array = []
for a in arr:
output.append(clamp(a, min, max))
return output
+
+
+## Save recorded export demos on window exit (Close game window instead of "Stop" button in Godot Editor)
+func _notification(what):
+ if demo_trajectories.size() == 0 or expert_demo_save_path.is_empty():
+ return
+
+ if what == NOTIFICATION_PREDELETE:
+ var json_string = JSON.stringify(demo_trajectories, "", false)
+ var file = FileAccess.open(expert_demo_save_path, FileAccess.WRITE)
+
+ if not file:
+ var error: Error = FileAccess.get_open_error()
+ assert(not error, "There was an error opening the file: %d" % error)
+
+ file.store_line(json_string)
+ var error = file.get_error()
+ assert(not error, "There was an error after trying to write to the file: %d" % error)
diff --git a/examples/VirtualCamera/project.godot b/examples/VirtualCamera/project.godot
index fbcff6e..2c53044 100644
--- a/examples/VirtualCamera/project.godot
+++ b/examples/VirtualCamera/project.godot
@@ -12,7 +12,7 @@ config_version=5
config/name="VirtualCamera"
run/main_scene="res://Env.tscn"
-config/features=PackedStringArray("4.2")
+config/features=PackedStringArray("4.3", "C#")
config/icon="res://icon.png"
[display]
@@ -32,27 +32,27 @@ enabled=PackedStringArray("res://addons/godot_rl_agents/plugin.cfg")
turn_left={
"deadzone": 0.5,
-"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":65,"physical_keycode":0,"key_label":0,"unicode":97,"echo":false,"script":null)
+"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":65,"physical_keycode":0,"key_label":0,"unicode":97,"location":0,"echo":false,"script":null)
]
}
turn_right={
"deadzone": 0.5,
-"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":68,"physical_keycode":0,"key_label":0,"unicode":100,"echo":false,"script":null)
+"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":68,"physical_keycode":0,"key_label":0,"unicode":100,"location":0,"echo":false,"script":null)
]
}
move_forwards={
"deadzone": 0.5,
-"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":87,"physical_keycode":0,"key_label":0,"unicode":119,"echo":false,"script":null)
+"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":87,"physical_keycode":0,"key_label":0,"unicode":119,"location":0,"echo":false,"script":null)
]
}
move_backwards={
"deadzone": 0.5,
-"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":83,"physical_keycode":0,"key_label":0,"unicode":115,"echo":false,"script":null)
+"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":83,"physical_keycode":0,"key_label":0,"unicode":115,"location":0,"echo":false,"script":null)
]
}
r_key={
"deadzone": 0.5,
-"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":82,"physical_keycode":0,"key_label":0,"unicode":114,"echo":false,"script":null)
+"events": [Object(InputEventKey,"resource_local_to_scene":false,"resource_name":"","device":-1,"window_id":0,"alt_pressed":false,"shift_pressed":false,"ctrl_pressed":false,"meta_pressed":false,"pressed":false,"keycode":82,"physical_keycode":0,"key_label":0,"unicode":114,"location":0,"echo":false,"script":null)
]
}