forked from stepjam/RLBench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
imitation_learning.py
51 lines (37 loc) · 1.59 KB
/
imitation_learning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import numpy as np
from rlbench.action_modes.action_mode import MoveArmThenGripper
from rlbench.action_modes.arm_action_modes import JointVelocity
from rlbench.action_modes.gripper_action_modes import Discrete
from rlbench.environment import Environment
from rlbench.observation_config import ObservationConfig
from rlbench.tasks import ReachTarget
class ImitationLearning(object):
def predict_action(self, batch):
return np.random.uniform(size=(len(batch), 7))
def behaviour_cloning_loss(self, ground_truth_actions, predicted_actions):
return 1
# To use 'saved' demos, set the path below, and set live_demos=False
live_demos = True
DATASET = '' if live_demos else 'PATH/TO/YOUR/DATASET'
obs_config = ObservationConfig()
obs_config.set_all(True)
env = Environment(
action_mode=MoveArmThenGripper(
arm_action_mode=JointVelocity(), gripper_action_mode=Discrete()),
obs_config=ObservationConfig(),
headless=False)
env.launch()
task = env.get_task(ReachTarget)
il = ImitationLearning()
demos = task.get_demos(2, live_demos=live_demos) # -> List[List[Observation]]
demos = np.array(demos).flatten()
# An example of using the demos to 'train' using behaviour cloning loss.
for i in range(100):
print("'training' iteration %d" % i)
batch = np.random.choice(demos, replace=False)
batch_images = [obs.left_shoulder_rgb for obs in batch]
predicted_actions = il.predict_action(batch_images)
ground_truth_actions = [obs.joint_velocities for obs in batch]
loss = il.behaviour_cloning_loss(ground_truth_actions, predicted_actions)
print('Done')
env.shutdown()