From bfaa04125e82769ed5a6a5ce29f0eec872b63e8b Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 10:53:23 -0400 Subject: [PATCH 01/28] passing unit tests for big/small/short/tall --- .../learn_imprecise_descriptions_test.py | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 tests/learner/learn_imprecise_descriptions_test.py diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py new file mode 100644 index 000000000..838b36578 --- /dev/null +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -0,0 +1,150 @@ +from itertools import chain + +import pytest + +from adam.curriculum.curriculum_utils import ( + standard_object, + phase1_instances, + PHASE1_CHOOSER_FACTORY, + PHASE1_TEST_CHOOSER_FACTORY, +) +from adam.curriculum.imprecise_descriptions_curriculum import ( + _big_x_template, + _little_x_template, + _short_x_template, + _tall_x_template, +) +from adam.learner import LearningExample +from adam.learner.verbs import SubsetVerbLearnerNew +from adam.learner.attributes import SubsetAttributeLearner, SubsetAttributeLearnerNew +from adam.learner.integrated_learner import IntegratedTemplateLearner +from adam.learner.language_mode import LanguageMode +from adam.learner.objects import ObjectRecognizerAsTemplateLearner +from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY, INANIMATE +from adam.situation.templates.phase1_templates import sampled +from tests.learner import phase1_language_generator, object_recognizer_factory + + +def subset_attribute_leaner_factory(language_mode: LanguageMode): + return SubsetAttributeLearner( + object_recognizer=object_recognizer_factory(language_mode), + ontology=GAILA_PHASE_1_ONTOLOGY, + language_mode=language_mode, + ) + + +def integrated_learner_factory(language_mode: LanguageMode): + return IntegratedTemplateLearner( + object_learner=ObjectRecognizerAsTemplateLearner( + object_recognizer=object_recognizer_factory(language_mode), + language_mode=language_mode, + ), + attribute_learner=SubsetAttributeLearnerNew( + ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode + ), + action_learner=SubsetVerbLearnerNew( + ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode + ), + ) + + +def run_imprecise_test(learner, situation_template, language_generator): + train_curriculum = phase1_instances( + "train", + chain( + *[ + sampled( + situation_template, + max_to_sample=10, + ontology=GAILA_PHASE_1_ONTOLOGY, + chooser=PHASE1_CHOOSER_FACTORY(), + ) + ] + ), + language_generator=language_generator, + ) + test_curriculum = phase1_instances( + "test", + chain( + *[ + sampled( + situation_template, + max_to_sample=1, + ontology=GAILA_PHASE_1_ONTOLOGY, + chooser=PHASE1_TEST_CHOOSER_FACTORY(), + ) + ] + ), + language_generator=language_generator, + ) + + for ( + _, + linguistic_description, + perceptual_representation, + ) in train_curriculum.instances(): + # Get the object matches first - preposition learner can't learn without already recognized objects + learner.observe( + LearningExample(perceptual_representation, linguistic_description) + ) + for ( + _, + test_lingustics_description, + test_perceptual_representation, + ) in test_curriculum.instances(): + descriptions_from_learner = learner.describe(test_perceptual_representation) + gold = test_lingustics_description.as_token_sequence() + assert descriptions_from_learner + assert gold in [desc.as_token_sequence() for desc in descriptions_from_learner] + + +@pytest.mark.parametrize( + "learner", [subset_attribute_leaner_factory, integrated_learner_factory] +) +@pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) +def test_tall(learner, language): + object_0 = standard_object("object_0", required_properties=[INANIMATE]) + run_imprecise_test( + learner(language), + _tall_x_template(object_0, []), + language_generator=phase1_language_generator(language), + ) + + +@pytest.mark.parametrize( + "learner", [subset_attribute_leaner_factory, integrated_learner_factory] +) +@pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) +def test_short(learner, language): + object_0 = standard_object("object_0", required_properties=[INANIMATE]) + run_imprecise_test( + learner(language), + _short_x_template(object_0, []), + language_generator=phase1_language_generator(language), + ) + + +@pytest.mark.parametrize( + "learner", [subset_attribute_leaner_factory, integrated_learner_factory] +) +@pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) +def test_big(learner, language): + object_0 = standard_object("object_0", required_properties=[INANIMATE]) + run_imprecise_test( + learner(language), + _big_x_template(object_0, []), + language_generator=phase1_language_generator(language), + ) + + +@pytest.mark.parametrize( + "learner", [subset_attribute_leaner_factory, integrated_learner_factory] +) +@pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) +def test_small(learner, language): + object_0 = standard_object("object_0", required_properties=[INANIMATE]) + run_imprecise_test( + learner(language), + _little_x_template(object_0, []), + language_generator=phase1_language_generator(language), + ) From efb84aa8cf4c0d805b1280210322cc9582ca085c Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 13:28:20 -0400 Subject: [PATCH 02/28] biggest axis determination --- adam/ontology/phase2_ontology.py | 31 +++++++++++++++++++ .../m9/imprecise_descriptions.params | 0 2 files changed, 31 insertions(+) create mode 100644 parameters/experiments/m9/imprecise_descriptions.params diff --git a/adam/ontology/phase2_ontology.py b/adam/ontology/phase2_ontology.py index 12c58c29a..42949379e 100644 --- a/adam/ontology/phase2_ontology.py +++ b/adam/ontology/phase2_ontology.py @@ -2,6 +2,8 @@ from adam.ontology import OntologyNode, CAN_FILL_TEMPLATE_SLOT from adam.ontology.ontology import Ontology from adam.ontology.phase1_ontology import ( + MUCH_BIGGER_THAN, + MUCH_SMALLER_THAN, _make_cup_schema, _CHAIR_SCHEMA_BACK, _CHAIR_SCHEMA_SQUARE_SEAT, @@ -204,3 +206,32 @@ GAILA_PHASE_1_SIZE_GRADES, relation_type=BIGGER_THAN, opposite_type=SMALLER_THAN ), ) + + +def gravitationally_aligned_axis_is_largest( + ontology_node: OntologyNode, ontology: Ontology +) -> bool: + schemata = list(ontology.structural_schemata(ontology_node)) + if not schemata or len(schemata) != 1: + return False + gravitational = schemata[0].axes.gravitationally_aligned_axis + relations = schemata[0].axes.axis_relations + if not gravitational or not relations: + return False + return ( + any( + r.first_slot == gravitational + and r.relation_type in [BIGGER_THAN, MUCH_BIGGER_THAN] + for r in relations + ) + and not any( + r.first_slot == gravitational + and r.relation_type in [SMALLER_THAN, MUCH_SMALLER_THAN] + for r in relations + ) + and not any( + r.second_slot == gravitational + and r.relation_type in [BIGGER_THAN, MUCH_BIGGER_THAN] + for r in relations + ) + ) diff --git a/parameters/experiments/m9/imprecise_descriptions.params b/parameters/experiments/m9/imprecise_descriptions.params new file mode 100644 index 000000000..e69de29bb From 20402a7c9c07fbe7b4ac31c3a5dae27b785f48f1 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 14:40:26 -0400 Subject: [PATCH 03/28] relative size perception --- adam/ontology/phase1_ontology.py | 2 ++ ...n_to_developmental_primitive_perception.py | 31 ++++++++++++++++++- adam/perception/perception_graph.py | 1 - 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/adam/ontology/phase1_ontology.py b/adam/ontology/phase1_ontology.py index ab4981314..5813b3c8d 100644 --- a/adam/ontology/phase1_ontology.py +++ b/adam/ontology/phase1_ontology.py @@ -642,6 +642,8 @@ def _far_region_factory( subtype(SIZE_RELATION, RELATION) BIGGER_THAN = OntologyNode("biggerThan") + +SAME_TYPE = OntologyNode("same-type") """ A relation indicating that one object is bigger than another object. diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index 02bdab1c0..3d12e2826 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -21,6 +21,7 @@ from adam.ontology.ontology import Ontology from adam.ontology.phase1_ontology import ( ABOUT_THE_SAME_SIZE_AS_LEARNER, + SAME_TYPE, BABY, COLOR, COLORS_TO_RGBS, @@ -242,7 +243,7 @@ def _real_do(self) -> PerceptualRepresentation[DevelopmentalPrimitivePerceptionF # Handle implicit size relations self._perceive_size_relative_to_learner() - # self._perceive_implicit_size() + self._perceive_all_relative_size() # for now, we assume that actions do not alter the relationship of objects axes # to the speaker, learner, and addressee @@ -734,6 +735,34 @@ def dfs_walk(node: ObjectPerception, inherited_color=None): dfs_walk(root) + def _perceive_all_relative_size(self) -> None: + """This method handles perception of relative size of two objects of the same type""" + for ( + perception, + ontology_type, + ) in self._object_perceptions_to_ontology_nodes.items(): + size_relations = immutableset( + relation + for relation in self._situation.ontology.subjects_to_relations[ + ontology_type + ] + if relation.relation_type in SIZE_RELATIONS + ) + if size_relations: + if len(size_relations) > 1: + raise RuntimeError( + f"Expected only one size relations for " + f"{ontology_type} but got {size_relations}" + ) + # only record relative size if the objects are of the same type, and record this as well if they are + if only(size_relations).first_slot == only(size_relations).second_slot: + self._property_assertion_perceptions.append( + HasBinaryProperty(perception, only(size_relations).relation_type) + ) + self._property_assertion_perceptions.append( + HasBinaryProperty(perception, SAME_TYPE) + ) + def _perceive_size_relative_to_learner(self) -> None: """ When doing object recognition, diff --git a/adam/perception/perception_graph.py b/adam/perception/perception_graph.py index 4697c0303..2a6e7d811 100644 --- a/adam/perception/perception_graph.py +++ b/adam/perception/perception_graph.py @@ -2912,7 +2912,6 @@ def _map_relation( ) else: label = relation.relation_type - graph.add_edge( self._map_node(relation.first_slot), self._map_node(relation.second_slot), From 878ad0478325286968be0209b0752573e436654e Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 14:47:42 -0400 Subject: [PATCH 04/28] fix small error --- ...uation_to_developmental_primitive_perception.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index 3d12e2826..a0b25b999 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -747,6 +747,7 @@ def _perceive_all_relative_size(self) -> None: ontology_type ] if relation.relation_type in SIZE_RELATIONS + and relation.second_slot == relation.first_slot ) if size_relations: if len(size_relations) > 1: @@ -755,13 +756,12 @@ def _perceive_all_relative_size(self) -> None: f"{ontology_type} but got {size_relations}" ) # only record relative size if the objects are of the same type, and record this as well if they are - if only(size_relations).first_slot == only(size_relations).second_slot: - self._property_assertion_perceptions.append( - HasBinaryProperty(perception, only(size_relations).relation_type) - ) - self._property_assertion_perceptions.append( - HasBinaryProperty(perception, SAME_TYPE) - ) + self._property_assertion_perceptions.append( + HasBinaryProperty(perception, only(size_relations).relation_type) + ) + self._property_assertion_perceptions.append( + HasBinaryProperty(perception, SAME_TYPE) + ) def _perceive_size_relative_to_learner(self) -> None: """ From 637432303f0cd157043425c07fdb252c5373da36 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 16:04:30 -0400 Subject: [PATCH 05/28] add relations and attributes to perception and test --- ...n_to_developmental_primitive_perception.py | 53 ++++++++++--------- ...developmental_primitive_perception_test.py | 30 +++++++++++ 2 files changed, 57 insertions(+), 26 deletions(-) diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index a0b25b999..6f1e8d563 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -237,12 +237,12 @@ def _real_do(self) -> PerceptualRepresentation[DevelopmentalPrimitivePerceptionF self._relation_perceptions.extend( self._perceive_relation(relation) for relation in self._situation.always_relations + if relation.relation_type not in SIZE_RELATIONS ) # Once all the objects and relations are perceived, determine their colors. self._perceive_colors() - # Handle implicit size relations - self._perceive_size_relative_to_learner() + # self._perceive_size_relative_to_learner() self._perceive_all_relative_size() # for now, we assume that actions do not alter the relationship of objects axes @@ -737,31 +737,32 @@ def dfs_walk(node: ObjectPerception, inherited_color=None): def _perceive_all_relative_size(self) -> None: """This method handles perception of relative size of two objects of the same type""" - for ( - perception, - ontology_type, - ) in self._object_perceptions_to_ontology_nodes.items(): - size_relations = immutableset( - relation - for relation in self._situation.ontology.subjects_to_relations[ - ontology_type - ] - if relation.relation_type in SIZE_RELATIONS - and relation.second_slot == relation.first_slot + size_relations = [ + relation + for relation in self._situation.always_relations + if relation.relation_type in SIZE_RELATIONS + and ( + isinstance(relation.first_slot, SituationObject) + and isinstance(relation.second_slot, SituationObject) + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ) - if size_relations: - if len(size_relations) > 1: - raise RuntimeError( - f"Expected only one size relations for " - f"{ontology_type} but got {size_relations}" - ) - # only record relative size if the objects are of the same type, and record this as well if they are - self._property_assertion_perceptions.append( - HasBinaryProperty(perception, only(size_relations).relation_type) - ) - self._property_assertion_perceptions.append( - HasBinaryProperty(perception, SAME_TYPE) - ) + ] + for relation in size_relations: + perception = self._objects_to_perceptions[relation.first_slot] + # only record relative size if the objects are of the same type, and record this as well if they are + self._property_assertion_perceptions.append( + HasBinaryProperty(perception, relation.relation_type) + ) + # add a relation indicating that these are of the same type + same_relation = Relation( + first_slot=self._objects_to_perceptions[relation.first_slot], + second_slot=self._objects_to_perceptions[ # type:ignore + relation.second_slot + ], + relation_type=SAME_TYPE, + ) + self._relation_perceptions.extend([same_relation]) def _perceive_size_relative_to_learner(self) -> None: """ diff --git a/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py b/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py index 64eddc9fa..a34c7460f 100644 --- a/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py +++ b/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py @@ -5,6 +5,7 @@ from adam.axes import HorizontalAxisOfObject from adam.ontology import IN_REGION, OntologyNode, IS_SPEAKER from adam.ontology.phase1_ontology import ( + SAME_TYPE, AGENT, ANIMATE, BALL, @@ -44,6 +45,7 @@ far, near, on, + bigger_than, ) from adam.ontology.phase1_spatial_relations import ( DISTAL, @@ -78,6 +80,34 @@ ) +def test_big_ball(): + ball1 = situation_object(BALL, debug_handle="ball_0") + ball2 = situation_object(BALL, debug_handle="ball_1") + + ball_situation = HighLevelSemanticsSituation( + ontology=GAILA_PHASE_1_ONTOLOGY, + salient_objects=[ball2, ball1], + always_relations=[bigger_than(ball1, ball2)], + ) + + assert ( + ball_situation.always_relations[0].first_slot.ontology_node + == ball_situation.always_relations[0].second_slot.ontology_node + ) + + ball_perception = _PERCEPTION_GENERATOR.generate_perception( + ball_situation, chooser=RandomChooser.for_seed(0) + ) + + perceived_objects = ball_perception.frames[0].perceived_objects + object_handles = set(obj.debug_handle for obj in perceived_objects) + assert object_handles == {"ball_0", "ball_1", "the ground"} + assert any( + relation.relation_type == SAME_TYPE + for relation in ball_perception.frames[0].relations + ) + + def test_person_and_ball(): person = situation_object(PERSON) ball = situation_object(BALL) From 812d31de17dd801245f75d24ed2929a2875b7518 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 16:10:35 -0400 Subject: [PATCH 06/28] update relation learning --- ..._semantics_situation_to_developmental_primitive_perception.py | 1 - 1 file changed, 1 deletion(-) diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index 6f1e8d563..2f41d4942 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -237,7 +237,6 @@ def _real_do(self) -> PerceptualRepresentation[DevelopmentalPrimitivePerceptionF self._relation_perceptions.extend( self._perceive_relation(relation) for relation in self._situation.always_relations - if relation.relation_type not in SIZE_RELATIONS ) # Once all the objects and relations are perceived, determine their colors. self._perceive_colors() From c7b2874700b39603c497124cab4243d7956bfc5d Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 9 Jul 2020 16:53:31 -0400 Subject: [PATCH 07/28] chinese language generator is updated --- .../chinese/chinese_language_generator.py | 40 +++++++++---- .../test_chinese_language_generator.py | 56 ++++++++++--------- 2 files changed, 61 insertions(+), 35 deletions(-) diff --git a/adam/language_specific/chinese/chinese_language_generator.py b/adam/language_specific/chinese/chinese_language_generator.py index 1c5003aea..68af8b67a 100644 --- a/adam/language_specific/chinese/chinese_language_generator.py +++ b/adam/language_specific/chinese/chinese_language_generator.py @@ -9,6 +9,7 @@ from adam.language_specific.chinese.chinese_phase_2_lexicon import ( GAILA_PHASE_2_CHINESE_LEXICON, ) +from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest from adam.axes import FacingAddresseeAxis, GRAVITATIONAL_DOWN_TO_UP_AXIS from adam.language.dependency import ( DependencyRole, @@ -512,10 +513,15 @@ def _translate_relation_to_action_modifier( if ( relation.first_slot in self.situation.salient_objects and isinstance(relation.second_slot, SituationObject) - and relation.second_slot.ontology_node == LEARNER + and relation.second_slot.ontology_node + not in self.situation.salient_objects + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ): # tall - if USE_VERTICAL_MODIFIERS in self.situation.syntax_hints: + if gravitationally_aligned_axis_is_largest( + relation.first_slot.ontology_node, self.situation.ontology + ): token = DependencyTreeToken("gau1 da4", ADJECTIVE) # big else: @@ -530,10 +536,15 @@ def _translate_relation_to_action_modifier( if ( relation.first_slot in self.situation.salient_objects and isinstance(relation.second_slot, SituationObject) - and relation.second_slot.ontology_node == LEARNER + and relation.second_slot.ontology_node + not in self.situation.salient_objects + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ): # short - if USE_VERTICAL_MODIFIERS in self.situation.syntax_hints: + if gravitationally_aligned_axis_is_largest( + relation.first_slot.ontology_node, self.situation.ontology + ): token = DependencyTreeToken("dwan3", ADJECTIVE) # small else: @@ -1012,15 +1023,20 @@ def _translate_relation( relation: Relation[SituationObject], ): """Translate relations that the user explicitly calls out, including possession and region""" - if relation.relation_type == BIGGER_THAN: + # big is specified when there's two objects of the same type, and the second isn't salient if ( relation.first_slot in self.situation.salient_objects and isinstance(relation.second_slot, SituationObject) - and relation.second_slot.ontology_node == LEARNER + and relation.second_slot.ontology_node + not in self.situation.salient_objects + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ): # tall - if USE_VERTICAL_MODIFIERS in self.situation.syntax_hints: + if gravitationally_aligned_axis_is_largest( + relation.first_slot.ontology_node, self.situation.ontology + ): token = DependencyTreeToken("gau1 da4", ADJECTIVE) # big else: @@ -1035,10 +1051,15 @@ def _translate_relation( if ( relation.first_slot in self.situation.salient_objects and isinstance(relation.second_slot, SituationObject) - and relation.second_slot.ontology_node == LEARNER + and relation.second_slot.ontology_node + not in self.situation.salient_objects + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ): # short - if USE_VERTICAL_MODIFIERS in self.situation.syntax_hints: + if gravitationally_aligned_axis_is_largest( + relation.first_slot.ontology_node, self.situation.ontology + ): token = DependencyTreeToken("dwan3", ADJECTIVE) # small else: @@ -1323,5 +1344,4 @@ def _init_object_counts(self) -> Mapping[OntologyNode, int]: ATTRIBUTES_AS_X_IS_Y = "ATTRIBUTES_AS_X_IS_Y" USE_NEAR = "USE_NEAR" IGNORE_GOAL = "IGNORE_GOAL" -USE_VERTICAL_MODIFIERS = "USE_VERTICAL_MODIFIERS" USE_ABOVE_BELOW = "USE_ABOVE_BELOW" diff --git a/tests/language_specific/chinese/test_chinese_language_generator.py b/tests/language_specific/chinese/test_chinese_language_generator.py index 6fd2d362a..d9d4dc4f1 100644 --- a/tests/language_specific/chinese/test_chinese_language_generator.py +++ b/tests/language_specific/chinese/test_chinese_language_generator.py @@ -10,6 +10,7 @@ AxesInfo, GRAVITATIONAL_AXIS_FUNCTION, ) +from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest from adam.language_specific.chinese.chinese_language_generator import ( PREFER_DITRANSITIVE, SimpleRuleBasedChineseLanguageGenerator, @@ -17,7 +18,6 @@ IGNORE_HAS_AS_VERB, ATTRIBUTES_AS_X_IS_Y, IGNORE_GOAL, - USE_VERTICAL_MODIFIERS, USE_ABOVE_BELOW, USE_NEAR, ) @@ -2507,50 +2507,56 @@ def test_I_walk_out_of_house(): assert generated_tokens(situation) == ("wo3", "bu4 sying2", "chu1", "wu1") -def test_big_truck(): - learner = situation_object(LEARNER) - truck = situation_object(TRUCK) +def test_big_truck_updated(): + truck1 = situation_object(TRUCK, debug_handle="truck1") + truck2 = situation_object(TRUCK, debug_handle="truck2") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[truck], - always_relations=[(bigger_than(truck, learner))], + salient_objects=[truck1], + other_objects=[truck2], + always_relations=[(bigger_than(truck1, truck2))], ) + assert not gravitationally_aligned_axis_is_largest(TRUCK, GAILA_PHASE_1_ONTOLOGY) assert generated_tokens(situation) == ("da4", "ka3 che1") -def test_tall_truck(): - learner = situation_object(LEARNER) - truck = situation_object(TRUCK) +def test_tall_book_updated(): + book1 = situation_object(BOOK, debug_handle="book1") + book2 = situation_object(BOOK, debug_handle="book2") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[truck], - always_relations=[(bigger_than(truck, learner))], - syntax_hints=[USE_VERTICAL_MODIFIERS], + salient_objects=[book1], + other_objects=[book2], + always_relations=[(bigger_than(book1, book2))], ) - assert generated_tokens(situation) == ("gau1 da4", "ka3 che1") + assert gravitationally_aligned_axis_is_largest(BOOK, GAILA_PHASE_1_ONTOLOGY) + assert generated_tokens(situation) == ("gau1 da4", "shu1") -def test_small_truck(): - learner = situation_object(LEARNER) - truck = situation_object(TRUCK) +def test_small_truck_updated(): + truck1 = situation_object(TRUCK, debug_handle="truck1") + truck2 = situation_object(TRUCK, debug_handle="truck2") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[truck], - always_relations=[(bigger_than(learner, truck))], + salient_objects=[truck1], + other_objects=[truck2], + always_relations=[(bigger_than(truck2, truck1))], ) + assert not gravitationally_aligned_axis_is_largest(TRUCK, GAILA_PHASE_1_ONTOLOGY) assert generated_tokens(situation) == ("syau3", "ka3 che1") -def test_short_truck(): - learner = situation_object(LEARNER) - truck = situation_object(TRUCK) +def test_short_book_updated(): + book1 = situation_object(BOOK, debug_handle="book1") + book2 = situation_object(BOOK, debug_handle="book2") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[truck], - always_relations=[(bigger_than(learner, truck))], - syntax_hints=[USE_VERTICAL_MODIFIERS], + salient_objects=[book1], + other_objects=[book2], + always_relations=[(bigger_than(book2, book1))], ) - assert generated_tokens(situation) == ("dwan3", "ka3 che1") + assert gravitationally_aligned_axis_is_largest(BOOK, GAILA_PHASE_1_ONTOLOGY) + assert generated_tokens(situation) == ("dwan3", "shu1") # there is no under/below distinction in Chinese From ce06d679482b574af2e3f0cc88045d10c524e169 Mon Sep 17 00:00:00 2001 From: paynesa Date: Fri, 10 Jul 2020 12:51:21 -0400 Subject: [PATCH 08/28] update complete; todo = test learning --- .../imprecise_descriptions_curriculum.py | 183 +++++++++++------- .../english/english_language_generator.py | 21 +- adam/situation/templates/phase1_templates.py | 6 +- .../m9/imprecise_descriptions.params | 0 tests/curriculum/phase1_curriculum_test.py | 2 +- .../test_english_language_generator.py | 71 ++++--- 6 files changed, 175 insertions(+), 108 deletions(-) delete mode 100644 parameters/experiments/m9/imprecise_descriptions.params diff --git a/adam/curriculum/imprecise_descriptions_curriculum.py b/adam/curriculum/imprecise_descriptions_curriculum.py index 0518070f6..ab047814c 100644 --- a/adam/curriculum/imprecise_descriptions_curriculum.py +++ b/adam/curriculum/imprecise_descriptions_curriculum.py @@ -4,6 +4,7 @@ from more_itertools import flatten from adam.language.language_generator import LanguageGenerator from adam.language.dependency import LinearizedDependencyTree +from adam.ontology import OntologyNode from adam.curriculum.curriculum_utils import ( Phase1InstanceGroup, PHASE1_CHOOSER_FACTORY, @@ -11,6 +12,9 @@ standard_object, learner_template_factory, ) +from adam.language_specific import MASS_NOUN +from adam.language.dependency.universal_dependencies import NOUN +from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest from adam.curriculum.phase1_curriculum import ( make_pass_template, throw_on_ground_template, @@ -32,7 +36,6 @@ ) from adam.language_specific.english.english_language_generator import ( USE_ADVERBIAL_PATH_MODIFIER, - USE_VERTICAL_MODIFIERS, ) from adam.language_specific.english.english_language_generator import ( GAILA_PHASE_1_LANGUAGE_GENERATOR, @@ -69,6 +72,8 @@ TABLE, THEME, SPIN, + HEAD, + HAND, ) from adam.situation import Action, SituationObject from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation @@ -77,24 +82,29 @@ TemplateObjectVariable, Phase1SituationTemplate, ) +from adam.language_specific.english.english_phase_1_lexicon import ( + GAILA_PHASE_1_ENGLISH_LEXICON, +) BOOL_SET = immutableset([True, False]) - -# TODO: See https://github.com/isi-vista/adam/issues/742 - - -def _big_x_template( - theme: TemplateObjectVariable, background: Iterable[TemplateObjectVariable] -) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] - computed_background.extend(background) - return Phase1SituationTemplate( - f"big-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=computed_background, - asserted_always_relations=[bigger_than(theme, learner)], - ) +# easy hack to get all nouns that aren't recognized particulars, body parts, or mass nouns -- i.e. the ones that can be big or small +NODES_TO_CHOOSE_FROM = [ + x[0] + for x in GAILA_PHASE_1_ENGLISH_LEXICON._ontology_node_to_word.items() # pylint:disable=protected-access + if x[1].part_of_speech in [NOUN] + and MASS_NOUN not in x[1].properties + and x[0] not in [BABY, HEAD, HAND] +] +# differentiate between the nodes that can be modified with tall and those that can't +TALL_ELIGIBLE_NODES = [ + node + for node in NODES_TO_CHOOSE_FROM + if gravitationally_aligned_axis_is_largest(node, GAILA_PHASE_1_ONTOLOGY) +] +BIG_ELIGIBLE_NODES = [ + node for node in NODES_TO_CHOOSE_FROM if node not in TALL_ELIGIBLE_NODES +] +CHOOSER = PHASE1_CHOOSER_FACTORY() def make_eat_big_small_curriculum( @@ -121,7 +131,12 @@ def make_eat_big_small_curriculum( for _object in [COOKIE, WATERMELON]: object_to_eat = SituationObject.instantiate_ontology_node( ontology_node=_object, - debug_handle=_object.handle, + debug_handle=_object.handle + "_salient", + ontology=GAILA_PHASE_1_ONTOLOGY, + ) + object_to_eat2 = SituationObject.instantiate_ontology_node( + ontology_node=_object, + debug_handle=_object.handle + "_non_salient", ontology=GAILA_PHASE_1_ONTOLOGY, ) other_edibles = [ @@ -134,15 +149,16 @@ def make_eat_big_small_curriculum( ] computed_background = [learner] computed_background.extend(other_edibles) + computed_background.extend([object_to_eat2]) # Big for relation_list in [ [ - bigger_than(object_to_eat, learner), + bigger_than(object_to_eat, object_to_eat2), bigger_than(object_to_eat, other_edibles), ], [ - bigger_than(learner, object_to_eat), + bigger_than(object_to_eat2, object_to_eat), bigger_than(other_edibles, object_to_eat), ], ]: @@ -169,53 +185,71 @@ def make_eat_big_small_curriculum( ) -def _little_x_template( - theme: TemplateObjectVariable, background: Iterable[TemplateObjectVariable] +def _tall_x_template( + background: Iterable[TemplateObjectVariable], + random_node: OntologyNode = CHOOSER.choice(TALL_ELIGIBLE_NODES), ) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] + # hack to pick a random node that will yield "tall" + theme1 = standard_object("theme1", random_node) + theme2 = standard_object("theme2", random_node) + computed_background = [theme2] computed_background.extend(background) return Phase1SituationTemplate( - f"small-{theme.handle}", - salient_object_variables=[theme], + f"tall-{theme1.handle}", + salient_object_variables=[theme1], background_object_variables=computed_background, - asserted_always_relations=[bigger_than(learner, theme)], + asserted_always_relations=[bigger_than(theme1, theme2)], ) -def _tall_x_template( - theme: TemplateObjectVariable, background: Iterable[TemplateObjectVariable] +def _big_x_template( + background: Iterable[TemplateObjectVariable], + random_node: OntologyNode = CHOOSER.choice(BIG_ELIGIBLE_NODES), ) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] + # hack to pick a random node that will yield "big" + theme1 = standard_object("theme1", random_node) + theme2 = standard_object("theme2", random_node) + computed_background = [theme2, learner_template_factory()] computed_background.extend(background) + return Phase1SituationTemplate( + f"big-{theme1.handle}", + salient_object_variables=[theme1], + background_object_variables=computed_background, + asserted_always_relations=[bigger_than(theme1, theme2)], + ) - # TODO: This difference should be an axis size but we can't yet - # implement that. See: https://github.com/isi-vista/adam/issues/832 + +def _little_x_template( + background: Iterable[TemplateObjectVariable], + random_node: OntologyNode = CHOOSER.choice(BIG_ELIGIBLE_NODES), +) -> Phase1SituationTemplate: + # hack to pick a random node that will yield "little" + theme1 = standard_object("theme1", random_node) + theme2 = standard_object("theme2", random_node) + computed_background = [theme2] + computed_background.extend(background) return Phase1SituationTemplate( - f"tall-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=background, - asserted_always_relations=[bigger_than(theme, learner)], - syntax_hints=[USE_VERTICAL_MODIFIERS], + f"little-{theme1.handle}", + salient_object_variables=[theme1], + background_object_variables=computed_background, + asserted_always_relations=[bigger_than(theme2, theme1)], ) def _short_x_template( - theme: TemplateObjectVariable, background: Iterable[TemplateObjectVariable] + background: Iterable[TemplateObjectVariable], + random_node: OntologyNode = CHOOSER.choice(TALL_ELIGIBLE_NODES), ) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] + # hack to pick a random node that will yield "short" + theme1 = standard_object("theme1", random_node) + theme2 = standard_object("theme2", random_node) + computed_background = [theme2] computed_background.extend(background) - - # TODO: This difference should be an axis size but we can't yet - # implement that. See: https://github.com/isi-vista/adam/issues/832 return Phase1SituationTemplate( - f"tall-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=background, - asserted_always_relations=[bigger_than(learner, theme)], - syntax_hints=[USE_VERTICAL_MODIFIERS], + f"short-{theme1.handle}", + salient_object_variables=[theme1], + background_object_variables=computed_background, + asserted_always_relations=[bigger_than(theme2, theme1)], ) @@ -242,7 +276,12 @@ def make_spin_tall_short_curriculum( for _object in [CHAIR, TABLE]: theme = SituationObject.instantiate_ontology_node( ontology_node=_object, - debug_handle=_object.handle, + debug_handle=_object.handle + "_salient", + ontology=GAILA_PHASE_1_ONTOLOGY, + ) + theme2 = SituationObject.instantiate_ontology_node( + ontology_node=_object, + debug_handle=_object.handle + "_non_salient", ontology=GAILA_PHASE_1_ONTOLOGY, ) other_objs = [ @@ -255,11 +294,12 @@ def make_spin_tall_short_curriculum( ] computed_background = [learner] computed_background.extend(other_objs) + computed_background.extend([theme2]) # Tall and short for relation_list in [ - [bigger_than(learner, theme), bigger_than(other_objs, theme)], - [bigger_than(theme, learner), bigger_than(theme, other_objs)], + [bigger_than(theme2, theme), bigger_than(other_objs, theme)], + [bigger_than(theme, theme2), bigger_than(theme, other_objs)], ]: situations.append( HighLevelSemanticsSituation( @@ -276,7 +316,6 @@ def make_spin_tall_short_curriculum( ) ], always_relations=relation_list, - syntax_hints=[USE_VERTICAL_MODIFIERS], ) ) @@ -296,30 +335,40 @@ def make_imprecise_size_descriptions( background = immutableset( standard_object(f"noise_object_{x}") for x in range(num_noise_objects) ) - - theme_0 = standard_object("theme") - theme_1 = standard_object("theme-thing", THING) - + # we choose random tall and short nodes here + random_tall_nodes = [CHOOSER.choice(TALL_ELIGIBLE_NODES) for i in range(num_samples)] + random_big_nodes = [CHOOSER.choice(BIG_ELIGIBLE_NODES) for i in range(num_samples)] return phase1_instances( "Imprecise Size", chain( flatten( + # generate big and small for all eligible nodes [ sampled( - template(theme, background), + template(random_node=node, background=background), ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), - max_to_sample=num_samples, + max_to_sample=1, + block_multiple_of_the_same_type=False, ) - for template in [ - _big_x_template, - _little_x_template, - _tall_x_template, - _short_x_template, - ] - for theme in [theme_0, theme_1] + for node in random_big_nodes + for template in [_big_x_template, _little_x_template] ] - ) + ), + flatten( + # generate tall and short for all eligible nodes + [ + sampled( + template(random_node=node, background=background), + ontology=GAILA_PHASE_1_ONTOLOGY, + chooser=PHASE1_CHOOSER_FACTORY(), + max_to_sample=1, + block_multiple_of_the_same_type=False, + ) + for node in random_tall_nodes + for template in [_tall_x_template, _short_x_template] + ] + ), ), language_generator=language_generator, ) diff --git a/adam/language_specific/english/english_language_generator.py b/adam/language_specific/english/english_language_generator.py index e8b5917a8..8df344930 100644 --- a/adam/language_specific/english/english_language_generator.py +++ b/adam/language_specific/english/english_language_generator.py @@ -1,7 +1,7 @@ import collections from itertools import chain from typing import Iterable, List, Mapping, MutableMapping, Optional, Tuple, Union, cast - +from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest from attr import Factory, attrib, attrs from attr.validators import instance_of from immutablecollections import ImmutableSet, immutableset, immutablesetmultidict @@ -431,9 +431,14 @@ def _add_attributes( if ( relation.first_slot in self.situation.salient_objects and isinstance(relation.second_slot, SituationObject) - and relation.second_slot.ontology_node == LEARNER + and relation.second_slot.ontology_node + not in self.situation.salient_objects + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ): - if USE_VERTICAL_MODIFIERS in self.situation.syntax_hints: + if gravitationally_aligned_axis_is_largest( + relation.first_slot.ontology_node, self.situation.ontology + ): token = DependencyTreeToken("tall", ADJECTIVE) else: token = DependencyTreeToken("big", ADJECTIVE) @@ -445,9 +450,14 @@ def _add_attributes( if ( relation.first_slot in self.situation.salient_objects and isinstance(relation.second_slot, SituationObject) - and relation.second_slot.ontology_node == LEARNER + and relation.second_slot.ontology_node + not in self.situation.salient_objects + and relation.first_slot.ontology_node + == relation.second_slot.ontology_node ): - if USE_VERTICAL_MODIFIERS in self.situation.syntax_hints: + if gravitationally_aligned_axis_is_largest( + relation.first_slot.ontology_node, self.situation.ontology + ): token = DependencyTreeToken("short", ADJECTIVE) else: token = DependencyTreeToken("small", ADJECTIVE) @@ -1192,6 +1202,5 @@ def _init_object_counts(self) -> Mapping[OntologyNode, int]: ATTRIBUTES_AS_X_IS_Y = "ATTRIBUTES_AS_X_IS_Y" IGNORE_SIZE_ATTRIBUTE = "IGNORE_SIZE_ATTRIBUTE" IGNORE_GOAL = "IGNORE_GOAL" -USE_VERTICAL_MODIFIERS = "USE_VERTICAL_MODIFIERS" USE_ABOVE_BELOW = "USE_ABOVE_BELOW" USE_NEAR = "USE_NEAR" diff --git a/adam/situation/templates/phase1_templates.py b/adam/situation/templates/phase1_templates.py index e47fc718d..99d8d3e19 100644 --- a/adam/situation/templates/phase1_templates.py +++ b/adam/situation/templates/phase1_templates.py @@ -321,6 +321,7 @@ def sampled( chooser: SequenceChooser, max_to_sample: int, default_addressee_node: OntologyNode = LEARNER, + block_multiple_of_the_same_type: bool = True, ) -> Iterable[HighLevelSemanticsSituation]: """ Gets *max_to_sample* instantiations of *situation_template* with *ontology* @@ -330,7 +331,9 @@ def sampled( take( max_to_sample, _Phase1SituationTemplateGenerator( - ontology=ontology, variable_assigner=_SamplingVariableAssigner() + ontology=ontology, + variable_assigner=_SamplingVariableAssigner(), + block_multiple_objects_of_the_same_type=block_multiple_of_the_same_type, ).generate_situations( situation_template, chooser=chooser, @@ -424,7 +427,6 @@ def generate_situations( object_var_to_instantiations ): continue - if self.block_multiple_objects_of_the_same_type: object_instantiations_ontology_nodes = [ object_instantiation.ontology_node diff --git a/parameters/experiments/m9/imprecise_descriptions.params b/parameters/experiments/m9/imprecise_descriptions.params deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/curriculum/phase1_curriculum_test.py b/tests/curriculum/phase1_curriculum_test.py index 9801ac0c9..2c29b7d90 100644 --- a/tests/curriculum/phase1_curriculum_test.py +++ b/tests/curriculum/phase1_curriculum_test.py @@ -50,7 +50,7 @@ def curriculum_test(curriculum: Phase1InstanceGroup) -> None: # we don't need to do anything # the curriculum may be dynamically generated # so we just want to test we can instantiate it - pass + print(_) @pytest.mark.parametrize( diff --git a/tests/language_specific/english/test_english_language_generator.py b/tests/language_specific/english/test_english_language_generator.py index 08af2cd01..07fe11169 100644 --- a/tests/language_specific/english/test_english_language_generator.py +++ b/tests/language_specific/english/test_english_language_generator.py @@ -2,7 +2,7 @@ import pytest from more_itertools import only - +from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest from adam.axes import HorizontalAxisOfObject, FacingAddresseeAxis, AxesInfo from adam.language_specific.english.english_language_generator import ( PREFER_DITRANSITIVE, @@ -12,7 +12,6 @@ IGNORE_COLORS, USE_ABOVE_BELOW, USE_NEAR, - USE_VERTICAL_MODIFIERS, ) from adam.language_specific.english.english_phase_1_lexicon import ( GAILA_PHASE_1_ENGLISH_LEXICON, @@ -21,7 +20,9 @@ from adam.ontology.during import DuringAction from adam.ontology.phase1_ontology import ( AGENT, + BOOK, BABY, + TRUCK, BALL, BIRD, BOX, @@ -1639,50 +1640,56 @@ def test_box_without_attribute(): generated_tokens(box_without_attribute) -def test_bigger_than(): - box = situation_object(BOX) - learner = situation_object(LEARNER) - big_box = HighLevelSemanticsSituation( +def test_big_truck_updated(): + truck1 = situation_object(TRUCK, debug_handle="truck1") + truck2 = situation_object(TRUCK, debug_handle="truck2") + situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[box, learner], - always_relations=[bigger_than(box, learner)], + salient_objects=[truck1], + other_objects=[truck2], + always_relations=[(bigger_than(truck1, truck2))], ) - assert generated_tokens(situation=big_box) == ("a", "big", "box") + assert not gravitationally_aligned_axis_is_largest(TRUCK, GAILA_PHASE_1_ONTOLOGY) + assert generated_tokens(situation) == ("a", "big", "truck") -def test_taller_than(): - box = situation_object(BOX) - learner = situation_object(LEARNER) - big_box = HighLevelSemanticsSituation( +def test_tall_book_updated(): + book1 = situation_object(BOOK, debug_handle="book1") + book2 = situation_object(BOOK, debug_handle="book2") + situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[box, learner], - always_relations=[bigger_than(box, learner)], - syntax_hints=[USE_VERTICAL_MODIFIERS], + salient_objects=[book1], + other_objects=[book2], + always_relations=[(bigger_than(book1, book2))], ) - assert generated_tokens(situation=big_box) == ("a", "tall", "box") + assert gravitationally_aligned_axis_is_largest(BOOK, GAILA_PHASE_1_ONTOLOGY) + assert generated_tokens(situation) == ("a", "tall", "book") -def test_shorter_than(): - box = situation_object(BOX) - learner = situation_object(LEARNER) - big_box = HighLevelSemanticsSituation( +def test_short_book_updated(): + book1 = situation_object(BOOK, debug_handle="book1") + book2 = situation_object(BOOK, debug_handle="book2") + situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[box, learner], - always_relations=[bigger_than(learner, box)], - syntax_hints=[USE_VERTICAL_MODIFIERS], + salient_objects=[book1], + other_objects=[book2], + always_relations=[(bigger_than(book2, book1))], ) - assert generated_tokens(situation=big_box) == ("a", "short", "box") + assert gravitationally_aligned_axis_is_largest(BOOK, GAILA_PHASE_1_ONTOLOGY) + assert generated_tokens(situation) == ("a", "short", "book") -def test_smaller_than(): - box = situation_object(BOX) - learner = situation_object(LEARNER) - big_box = HighLevelSemanticsSituation( +def test_small_truck_updated(): + truck1 = situation_object(TRUCK, debug_handle="truck1") + truck2 = situation_object(TRUCK, debug_handle="truck2") + situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, - salient_objects=[box, learner], - always_relations=[bigger_than(learner, box)], + salient_objects=[truck1], + other_objects=[truck2], + always_relations=[(bigger_than(truck2, truck1))], ) - assert generated_tokens(situation=big_box) == ("a", "small", "box") + assert not gravitationally_aligned_axis_is_largest(TRUCK, GAILA_PHASE_1_ONTOLOGY) + assert generated_tokens(situation) == ("a", "small", "truck") def test_run(): From 0f7683a740574499cee9c38f61030cda25b2b4d8 Mon Sep 17 00:00:00 2001 From: paynesa Date: Fri, 10 Jul 2020 13:36:43 -0400 Subject: [PATCH 09/28] fix learning tests --- adam/learner/__init__.py | 8 +++++-- adam/learner/attributes.py | 6 +++-- adam/learner/integrated_learner.py | 4 +++- adam/learner/learner_utils.py | 3 ++- adam/learner/object_recognizer.py | 9 ++++++-- adam/learner/objects.py | 2 +- adam/learner/prepositions.py | 2 +- adam/learner/template_learner.py | 15 +++++++++---- adam/learner/verbs.py | 2 +- .../learn_imprecise_descriptions_test.py | 22 +++++++++---------- 10 files changed, 47 insertions(+), 26 deletions(-) diff --git a/adam/learner/__init__.py b/adam/learner/__init__.py index 209a582ea..aa2594884 100644 --- a/adam/learner/__init__.py +++ b/adam/learner/__init__.py @@ -94,7 +94,9 @@ def observe( @abstractmethod def describe( - self, perception: PerceptualRepresentation[PerceptionT] + self, + perception: PerceptualRepresentation[PerceptionT], + allow_undescribed: bool = False, ) -> Mapping[LinguisticDescription, float]: r""" Given a `PerceptualRepresentation` of a situation, produce one or more @@ -142,7 +144,9 @@ def observe( ] = learning_example.linguistic_description def describe( - self, perception: PerceptualRepresentation[PerceptionT] + self, + perception: PerceptualRepresentation[PerceptionT], + allow_undescribed: bool = False, # pylint:disable=unused-argument ) -> Mapping[LinguisticDescription, float]: memorized_description = self._memorized_situations.get(perception) if memorized_description: diff --git a/adam/learner/attributes.py b/adam/learner/attributes.py index dadc73ce2..9889d11da 100644 --- a/adam/learner/attributes.py +++ b/adam/learner/attributes.py @@ -154,9 +154,11 @@ def _preprocess_scene_for_learning( return post_recognition_object_perception_alignment def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph + self, perception_graph: PerceptionGraph, allow_undescribed: bool = False ) -> PerceptionGraphFromObjectRecognizer: - return self._object_recognizer.match_objects_old(perception_graph) + return self._object_recognizer.match_objects_old( + perception_graph, allow_undescribed=allow_undescribed + ) def _extract_surface_template( self, diff --git a/adam/learner/integrated_learner.py b/adam/learner/integrated_learner.py index 537cb0210..4476cde52 100644 --- a/adam/learner/integrated_learner.py +++ b/adam/learner/integrated_learner.py @@ -132,7 +132,9 @@ def observe( self.action_learner.learn_from(current_learner_state) def describe( - self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame] + self, + perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame], + allow_undescribed: bool = False, # pylint: disable=unused-argument ) -> Mapping[LinguisticDescription, float]: perception_graph = self._extract_perception_graph(perception) diff --git a/adam/learner/learner_utils.py b/adam/learner/learner_utils.py index 89538d0fb..18ce076a8 100644 --- a/adam/learner/learner_utils.py +++ b/adam/learner/learner_utils.py @@ -64,6 +64,7 @@ def pattern_match_to_description( pattern: PerceptionGraphTemplate, match: PerceptionGraphPatternMatch, matched_objects_to_names: Mapping[ObjectSemanticNode, Tuple[str, ...]], + allow_undescribed: bool = False, ) -> TokenSequenceLinguisticDescription: """ Given a `SurfaceTemplate`, will fill it in using a *match* for a *pattern*. @@ -78,7 +79,7 @@ def pattern_match_to_description( matched_object_nodes_without_names = matched_object_nodes - immutableset( matched_objects_to_names.keys() ) - if matched_object_nodes_without_names: + if matched_object_nodes_without_names and not allow_undescribed: raise RuntimeError( f"The following matched object nodes lack descriptions: " f"{matched_object_nodes_without_names}" diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index e2a5eeb1d..47112771f 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -81,6 +81,7 @@ class PerceptionGraphFromObjectRecognizer: description_to_matched_object_node: ImmutableDict[ Tuple[str, ...], ObjectSemanticNode ] = attrib(converter=_to_immutabledict) + allow_undescribed: bool = attrib(default=False) def __attrs_post_init__(self) -> None: matched_object_nodes = set( @@ -91,7 +92,10 @@ def __attrs_post_init__(self) -> None: described_matched_object_nodes = set( self.description_to_matched_object_node.values() ) - if matched_object_nodes != described_matched_object_nodes: + if ( + not self.allow_undescribed + and matched_object_nodes != described_matched_object_nodes + ): raise RuntimeError( f"A matched object node should be present in the graph" f"if and only if it is described. Got matches objects " @@ -217,7 +221,7 @@ def for_ontology_types( ) def match_objects_old( - self, perception_graph: PerceptionGraph + self, perception_graph: PerceptionGraph, allow_undescribed: bool = False ) -> PerceptionGraphFromObjectRecognizer: new_style_input = PerceptionSemanticAlignment( perception_graph=perception_graph, semantic_nodes=[] @@ -226,6 +230,7 @@ def match_objects_old( return PerceptionGraphFromObjectRecognizer( perception_graph=new_style_output[0].perception_graph, description_to_matched_object_node=new_style_output[1], + allow_undescribed=allow_undescribed, ) def match_objects( diff --git a/adam/learner/objects.py b/adam/learner/objects.py index f19b6bdaa..7ff531b67 100644 --- a/adam/learner/objects.py +++ b/adam/learner/objects.py @@ -133,7 +133,7 @@ def _preprocess_scene_for_learning( ) def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph + self, perception_graph: PerceptionGraph, allow_undescribed: bool = False ) -> PerceptionGraphFromObjectRecognizer: return PerceptionGraphFromObjectRecognizer( self._common_preprocessing(perception_graph), diff --git a/adam/learner/prepositions.py b/adam/learner/prepositions.py index a59c458de..7a36a928e 100644 --- a/adam/learner/prepositions.py +++ b/adam/learner/prepositions.py @@ -74,7 +74,7 @@ def _preprocess_scene_for_learning( return post_recognition_object_perception_alignment def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph + self, perception_graph: PerceptionGraph, allow_undescribed: bool = False ) -> PerceptionGraphFromObjectRecognizer: return self._object_recognizer.match_objects_old(perception_graph) diff --git a/adam/learner/template_learner.py b/adam/learner/template_learner.py index 545165f5b..14a61c459 100644 --- a/adam/learner/template_learner.py +++ b/adam/learner/template_learner.py @@ -87,13 +87,15 @@ def observe( self._learning_step(preprocessed_input, surface_template) def describe( - self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame] + self, + perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame], + allow_undescribed: bool = False, ) -> Mapping[LinguisticDescription, float]: self._assert_valid_input(perception) original_perception_graph = self._extract_perception_graph(perception) preprocessing_result = self._preprocess_scene_for_description( - original_perception_graph + original_perception_graph, allow_undescribed=allow_undescribed ) preprocessed_perception_graph = preprocessing_result.perception_graph matched_objects_to_names = ( @@ -112,6 +114,7 @@ def match_template( description_template: SurfaceTemplate, pattern: PerceptionGraphTemplate, score: float, + allow_undescribed: bool = False, ) -> None: # try to see if (our model of) its semantics is present in the situation. matcher = pattern.graph_pattern.matcher( @@ -128,6 +131,7 @@ def match_template( pattern=pattern, match=match, matched_objects_to_names=matched_objects_to_names, + allow_undescribed=allow_undescribed, ), pattern, score, @@ -139,7 +143,10 @@ def match_template( # For each template whose semantics we are certain of (=have been added to the lexicon) for (surface_template, graph_pattern, score) in self._primary_templates(): match_template( - description_template=surface_template, pattern=graph_pattern, score=score + description_template=surface_template, + pattern=graph_pattern, + score=score, + allow_undescribed=allow_undescribed, ) if not match_to_score: @@ -190,7 +197,7 @@ def _preprocess_scene_for_learning( @abstractmethod def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph + self, perception_graph: PerceptionGraph, allow_undescribed: bool = False ) -> PerceptionGraphFromObjectRecognizer: """ Does any preprocessing necessary before attempting to describe a scene. diff --git a/adam/learner/verbs.py b/adam/learner/verbs.py index a0cbcb659..072806767 100644 --- a/adam/learner/verbs.py +++ b/adam/learner/verbs.py @@ -148,7 +148,7 @@ def _preprocess_scene_for_learning( return post_recognition_object_perception_alignment def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph + self, perception_graph: PerceptionGraph, allow_undescribed: bool = False ) -> PerceptionGraphFromObjectRecognizer: return self._object_recognizer.match_objects_old(perception_graph) diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py index 838b36578..98891d80a 100644 --- a/tests/learner/learn_imprecise_descriptions_test.py +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -3,7 +3,6 @@ import pytest from adam.curriculum.curriculum_utils import ( - standard_object, phase1_instances, PHASE1_CHOOSER_FACTORY, PHASE1_TEST_CHOOSER_FACTORY, @@ -20,7 +19,7 @@ from adam.learner.integrated_learner import IntegratedTemplateLearner from adam.learner.language_mode import LanguageMode from adam.learner.objects import ObjectRecognizerAsTemplateLearner -from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY, INANIMATE +from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY from adam.situation.templates.phase1_templates import sampled from tests.learner import phase1_language_generator, object_recognizer_factory @@ -58,6 +57,8 @@ def run_imprecise_test(learner, situation_template, language_generator): max_to_sample=10, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + # this is a hack since our current object recognizer will throw a runtime error if there are percieved objects not in the description + block_multiple_of_the_same_type=False, ) ] ), @@ -72,6 +73,7 @@ def run_imprecise_test(learner, situation_template, language_generator): max_to_sample=1, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_TEST_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=False, ) ] ), @@ -92,7 +94,9 @@ def run_imprecise_test(learner, situation_template, language_generator): test_lingustics_description, test_perceptual_representation, ) in test_curriculum.instances(): - descriptions_from_learner = learner.describe(test_perceptual_representation) + descriptions_from_learner = learner.describe( + test_perceptual_representation, allow_undescribed=True + ) gold = test_lingustics_description.as_token_sequence() assert descriptions_from_learner assert gold in [desc.as_token_sequence() for desc in descriptions_from_learner] @@ -103,10 +107,9 @@ def run_imprecise_test(learner, situation_template, language_generator): ) @pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) def test_tall(learner, language): - object_0 = standard_object("object_0", required_properties=[INANIMATE]) run_imprecise_test( learner(language), - _tall_x_template(object_0, []), + _tall_x_template(background=[]), language_generator=phase1_language_generator(language), ) @@ -116,10 +119,9 @@ def test_tall(learner, language): ) @pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) def test_short(learner, language): - object_0 = standard_object("object_0", required_properties=[INANIMATE]) run_imprecise_test( learner(language), - _short_x_template(object_0, []), + _short_x_template(background=[]), language_generator=phase1_language_generator(language), ) @@ -129,10 +131,9 @@ def test_short(learner, language): ) @pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) def test_big(learner, language): - object_0 = standard_object("object_0", required_properties=[INANIMATE]) run_imprecise_test( learner(language), - _big_x_template(object_0, []), + _big_x_template(background=[]), language_generator=phase1_language_generator(language), ) @@ -142,9 +143,8 @@ def test_big(learner, language): ) @pytest.mark.parametrize("language", [LanguageMode.ENGLISH, LanguageMode.CHINESE]) def test_small(learner, language): - object_0 = standard_object("object_0", required_properties=[INANIMATE]) run_imprecise_test( learner(language), - _little_x_template(object_0, []), + _little_x_template(background=[]), language_generator=phase1_language_generator(language), ) From ae66a97b2c4b78a93b1a843be0d90e23e8440867 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 16 Jul 2020 18:30:42 -0400 Subject: [PATCH 10/28] rebase --- .../imprecise_descriptions_curriculum.py | 38 +++++++++---------- .../learn_imprecise_descriptions_test.py | 3 +- 2 files changed, 19 insertions(+), 22 deletions(-) diff --git a/adam/curriculum/imprecise_descriptions_curriculum.py b/adam/curriculum/imprecise_descriptions_curriculum.py index c2d119e37..fd94bfd90 100644 --- a/adam/curriculum/imprecise_descriptions_curriculum.py +++ b/adam/curriculum/imprecise_descriptions_curriculum.py @@ -1,5 +1,5 @@ from itertools import chain -from typing import Sequence, Optional +from typing import Sequence, Optional, Iterable from immutablecollections import immutableset from more_itertools import flatten from adam.language.language_generator import LanguageGenerator @@ -13,7 +13,6 @@ learner_template_factory, make_noise_objects, ) - from adam.language_specific import MASS_NOUN from adam.language.dependency.universal_dependencies import NOUN from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest @@ -108,8 +107,7 @@ CHOOSER = PHASE1_CHOOSER_FACTORY() - -def _big_x_template( +"""def _big_x_template( theme: TemplateObjectVariable, noise_objects: Optional[int] ) -> Phase1SituationTemplate: learner = learner_template_factory() @@ -123,7 +121,6 @@ def _big_x_template( ) - def _little_x_template( theme: TemplateObjectVariable, noise_objects: Optional[int] ) -> Phase1SituationTemplate: @@ -171,7 +168,7 @@ def _short_x_template( background_object_variables=computed_background, asserted_always_relations=[bigger_than(learner, theme)], syntax_hints=[USE_VERTICAL_MODIFIERS], - ) + )""" def make_eat_big_small_curriculum( # pylint: disable=unused-argument @@ -323,9 +320,9 @@ def _short_x_template( def make_spin_tall_short_curriculum( -# TODO: Refactor this curriculum -# See: https://github.com/isi-vista/adam/issues/898 -def make_spin_tall_short_curriculum( # pylint: disable=unused-argument + # TODO: Refactor this curriculum + # See: https://github.com/isi-vista/adam/issues/898 + # pylint: disable=unused-argument num_samples: Optional[int], noise_objects: Optional[int], language_generator: LanguageGenerator[ @@ -405,14 +402,19 @@ def make_imprecise_size_descriptions( HighLevelSemanticsSituation, LinearizedDependencyTree ], ) -> Phase1InstanceGroup: - theme_0 = standard_object("theme", banned_properties=[IS_SPEAKER, IS_ADDRESSEE]) - theme_1 = standard_object( - "theme-thing", THING, banned_properties=[IS_SPEAKER, IS_ADDRESSEE] + # we choose random tall and short nodes here + random_tall_nodes = ( + [CHOOSER.choice(TALL_ELIGIBLE_NODES) for i in range(num_samples)] + if num_samples + else [CHOOSER.choice(TALL_ELIGIBLE_NODES) for i in range(5)] + ) + random_big_nodes = ( + [CHOOSER.choice(BIG_ELIGIBLE_NODES) for i in range(num_samples)] + if num_samples + else [CHOOSER.choice(BIG_ELIGIBLE_NODES) for i in range(5)] ) - # we choose random tall and short nodes here - random_tall_nodes = [CHOOSER.choice(TALL_ELIGIBLE_NODES) for i in range(num_samples)] - random_big_nodes = [CHOOSER.choice(BIG_ELIGIBLE_NODES) for i in range(num_samples)] + background = make_noise_objects(noise_objects) return phase1_instances( "Imprecise Size", @@ -421,17 +423,11 @@ def make_imprecise_size_descriptions( # generate big and small for all eligible nodes [ sampled( - template(random_node=node, background=background), ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), - max_to_sample=1, block_multiple_of_the_same_type=False, - template(theme, noise_objects), - ontology=GAILA_PHASE_1_ONTOLOGY, - chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, - ) for node in random_big_nodes for template in [_big_x_template, _little_x_template] diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py index 98891d80a..7f23ea6b7 100644 --- a/tests/learner/learn_imprecise_descriptions_test.py +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -7,6 +7,7 @@ PHASE1_CHOOSER_FACTORY, PHASE1_TEST_CHOOSER_FACTORY, ) +from adam.language.language_utils import phase1_language_generator from adam.curriculum.imprecise_descriptions_curriculum import ( _big_x_template, _little_x_template, @@ -21,7 +22,7 @@ from adam.learner.objects import ObjectRecognizerAsTemplateLearner from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY from adam.situation.templates.phase1_templates import sampled -from tests.learner import phase1_language_generator, object_recognizer_factory +from tests.learner import object_recognizer_factory def subset_attribute_leaner_factory(language_mode: LanguageMode): From 6cdeb46e70863e9b5a0d1f23c0ce8e823551c965 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 16 Jul 2020 18:42:21 -0400 Subject: [PATCH 11/28] update comment removal --- .../imprecise_descriptions_curriculum.py | 64 ------------------- 1 file changed, 64 deletions(-) diff --git a/adam/curriculum/imprecise_descriptions_curriculum.py b/adam/curriculum/imprecise_descriptions_curriculum.py index fd94bfd90..a81222c5a 100644 --- a/adam/curriculum/imprecise_descriptions_curriculum.py +++ b/adam/curriculum/imprecise_descriptions_curriculum.py @@ -107,70 +107,6 @@ CHOOSER = PHASE1_CHOOSER_FACTORY() -"""def _big_x_template( - theme: TemplateObjectVariable, noise_objects: Optional[int] -) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] - computed_background.extend(make_noise_objects(noise_objects)) - return Phase1SituationTemplate( - f"big-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=computed_background, - asserted_always_relations=[bigger_than(theme, learner)], - ) - - -def _little_x_template( - theme: TemplateObjectVariable, noise_objects: Optional[int] -) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] - computed_background.extend(make_noise_objects(noise_objects)) - return Phase1SituationTemplate( - f"small-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=computed_background, - asserted_always_relations=[bigger_than(learner, theme)], - ) - - -def _tall_x_template( - theme: TemplateObjectVariable, noise_objects: Optional[int] -) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] - computed_background.extend(make_noise_objects(noise_objects)) - - # TODO: This difference should be an axis size but we can't yet - # implement that. See: https://github.com/isi-vista/adam/issues/832 - return Phase1SituationTemplate( - f"tall-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=computed_background, - asserted_always_relations=[bigger_than(theme, learner)], - syntax_hints=[USE_VERTICAL_MODIFIERS], - ) - - -def _short_x_template( - theme: TemplateObjectVariable, noise_objects: Optional[int] -) -> Phase1SituationTemplate: - learner = learner_template_factory() - computed_background = [learner] - computed_background.extend(make_noise_objects(noise_objects)) - - # TODO: This difference should be an axis size but we can't yet - # implement that. See: https://github.com/isi-vista/adam/issues/832 - return Phase1SituationTemplate( - f"tall-{theme.handle}", - salient_object_variables=[theme], - background_object_variables=computed_background, - asserted_always_relations=[bigger_than(learner, theme)], - syntax_hints=[USE_VERTICAL_MODIFIERS], - )""" - - def make_eat_big_small_curriculum( # pylint: disable=unused-argument num_samples: Optional[int], noise_objects: Optional[int], From 773ad286c03b7b06da2fcef6d26fb7569983b35d Mon Sep 17 00:00:00 2001 From: paynesa Date: Fri, 17 Jul 2020 01:19:17 -0400 Subject: [PATCH 12/28] remove unused args --- adam/learner/learner_utils.py | 18 ++---------------- adam/learner/template_learner.py | 8 ++------ tests/learner/subset_verb_learner_test.py | 1 + 3 files changed, 5 insertions(+), 22 deletions(-) diff --git a/adam/learner/learner_utils.py b/adam/learner/learner_utils.py index 5b5bc3717..37cb9991a 100644 --- a/adam/learner/learner_utils.py +++ b/adam/learner/learner_utils.py @@ -64,26 +64,12 @@ def pattern_match_to_description( pattern: PerceptionGraphTemplate, match: PerceptionGraphPatternMatch, matched_objects_to_names: Mapping[ObjectSemanticNode, Tuple[str, ...]], - allow_undescribed: bool = False, ) -> TokenSequenceLinguisticDescription: """ Given a `SurfaceTemplate`, will fill it in using a *match* for a *pattern*. This requires a mapping from matched object nodes in the perception to the strings which should be used to name them. """ - matched_object_nodes = immutableset( - perception_node - for perception_node in match.pattern_node_to_matched_graph_node.values() - if isinstance(perception_node, ObjectSemanticNode) - ) - matched_object_nodes_without_names = matched_object_nodes - immutableset( - matched_objects_to_names.keys() - ) - if matched_object_nodes_without_names and not allow_undescribed: - raise RuntimeError( - f"The following matched object nodes lack descriptions: " - f"{matched_object_nodes_without_names}" - ) try: return surface_template.instantiate( @@ -107,8 +93,8 @@ def pattern_match_to_description( and pattern_node in pattern.pattern_node_to_template_variable ) ) - except KeyError: - print("foo") + except KeyError as e: + logging.warning(str(e)) raise diff --git a/adam/learner/template_learner.py b/adam/learner/template_learner.py index 0a03fd036..c98d00c3b 100644 --- a/adam/learner/template_learner.py +++ b/adam/learner/template_learner.py @@ -121,7 +121,6 @@ def match_template( description_template: SurfaceTemplate, pattern: PerceptionGraphTemplate, score: float, - allow_undescribed: bool = False, ) -> None: # try to see if (our model of) its semantics is present in the situation. matcher = pattern.graph_pattern.matcher( @@ -138,7 +137,7 @@ def match_template( pattern=pattern, match=match, matched_objects_to_names=matched_objects_to_names, - allow_undescribed=allow_undescribed, + # allow_undescribed=allow_undescribed, ), pattern, score, @@ -150,10 +149,7 @@ def match_template( # For each template whose semantics we are certain of (=have been added to the lexicon) for (surface_template, graph_pattern, score) in self._primary_templates(): match_template( - description_template=surface_template, - pattern=graph_pattern, - score=score, - allow_undescribed=allow_undescribed, + description_template=surface_template, pattern=graph_pattern, score=score ) if not match_to_score: diff --git a/tests/learner/subset_verb_learner_test.py b/tests/learner/subset_verb_learner_test.py index e98b19290..c4920a499 100644 --- a/tests/learner/subset_verb_learner_test.py +++ b/tests/learner/subset_verb_learner_test.py @@ -135,6 +135,7 @@ def run_verb_test(learner, situation_template, language_generator): ) in test_curriculum.instances(): descriptions_from_learner = learner.describe(test_perceptual_representation) gold = test_lingustics_description.as_token_sequence() + print(gold) assert descriptions_from_learner assert gold in [desc.as_token_sequence() for desc in descriptions_from_learner] From 1c9e8016795e7382bb95dce68e48db8192d58582 Mon Sep 17 00:00:00 2001 From: paynesa Date: Fri, 17 Jul 2020 10:58:56 -0400 Subject: [PATCH 13/28] remove more unused arguments --- adam/learner/__init__.py | 8 ++------ adam/learner/attributes.py | 6 ++---- adam/learner/integrated_learner.py | 4 +--- adam/learner/object_recognizer.py | 11 +++-------- adam/learner/objects.py | 2 +- adam/learner/prepositions.py | 2 +- adam/learner/template_learner.py | 8 +++----- adam/learner/verbs.py | 2 +- tests/learner/learn_imprecise_descriptions_test.py | 4 +--- 9 files changed, 15 insertions(+), 32 deletions(-) diff --git a/adam/learner/__init__.py b/adam/learner/__init__.py index 96dd20a9c..a0c82ca85 100644 --- a/adam/learner/__init__.py +++ b/adam/learner/__init__.py @@ -96,9 +96,7 @@ def observe( @abstractmethod def describe( - self, - perception: PerceptualRepresentation[PerceptionT], - allow_undescribed: bool = False, + self, perception: PerceptualRepresentation[PerceptionT] ) -> Mapping[LinguisticDescription, float]: r""" Given a `PerceptualRepresentation` of a situation, produce one or more @@ -148,9 +146,7 @@ def observe( ] = learning_example.linguistic_description def describe( - self, - perception: PerceptualRepresentation[PerceptionT], - allow_undescribed: bool = False, # pylint:disable=unused-argument + self, perception: PerceptualRepresentation[PerceptionT] ) -> Mapping[LinguisticDescription, float]: memorized_description = self._memorized_situations.get(perception) if memorized_description: diff --git a/adam/learner/attributes.py b/adam/learner/attributes.py index 9889d11da..dadc73ce2 100644 --- a/adam/learner/attributes.py +++ b/adam/learner/attributes.py @@ -154,11 +154,9 @@ def _preprocess_scene_for_learning( return post_recognition_object_perception_alignment def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph, allow_undescribed: bool = False + self, perception_graph: PerceptionGraph ) -> PerceptionGraphFromObjectRecognizer: - return self._object_recognizer.match_objects_old( - perception_graph, allow_undescribed=allow_undescribed - ) + return self._object_recognizer.match_objects_old(perception_graph) def _extract_surface_template( self, diff --git a/adam/learner/integrated_learner.py b/adam/learner/integrated_learner.py index 07ac165fd..c4edbe6f5 100644 --- a/adam/learner/integrated_learner.py +++ b/adam/learner/integrated_learner.py @@ -143,9 +143,7 @@ def observe( self.action_learner.learn_from(current_learner_state) def describe( - self, - perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame], - allow_undescribed: bool = False, # pylint: disable=unused-argument + self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame] ) -> Mapping[LinguisticDescription, float]: perception_graph = self._extract_perception_graph(perception) diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index 8cf1e39a7..757abb425 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -84,7 +84,6 @@ class PerceptionGraphFromObjectRecognizer: description_to_matched_object_node: ImmutableDict[ Tuple[str, ...], ObjectSemanticNode ] = attrib(converter=_to_immutabledict) - allow_undescribed: bool = attrib(default=False) def __attrs_post_init__(self) -> None: matched_object_nodes = set( @@ -95,11 +94,8 @@ def __attrs_post_init__(self) -> None: described_matched_object_nodes = set( self.description_to_matched_object_node.values() ) - if ( - not self.allow_undescribed - and matched_object_nodes != described_matched_object_nodes - ): - raise RuntimeError( + if matched_object_nodes != described_matched_object_nodes: + logging.warning( f"A matched object node should be present in the graph" f"if and only if it is described. Got matches objects " f"{matched_object_nodes} but those described were {described_matched_object_nodes}" @@ -227,7 +223,7 @@ def for_ontology_types( ) def match_objects_old( - self, perception_graph: PerceptionGraph, allow_undescribed: bool = False + self, perception_graph: PerceptionGraph ) -> PerceptionGraphFromObjectRecognizer: new_style_input = PerceptionSemanticAlignment( perception_graph=perception_graph, semantic_nodes=[] @@ -236,7 +232,6 @@ def match_objects_old( return PerceptionGraphFromObjectRecognizer( perception_graph=new_style_output[0].perception_graph, description_to_matched_object_node=new_style_output[1], - allow_undescribed=allow_undescribed, ) def match_objects( diff --git a/adam/learner/objects.py b/adam/learner/objects.py index 2cb042a7e..37e038d56 100644 --- a/adam/learner/objects.py +++ b/adam/learner/objects.py @@ -137,7 +137,7 @@ def _preprocess_scene_for_learning( ) def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph, allow_undescribed: bool = False + self, perception_graph: PerceptionGraph ) -> PerceptionGraphFromObjectRecognizer: return PerceptionGraphFromObjectRecognizer( self._common_preprocessing(perception_graph), diff --git a/adam/learner/prepositions.py b/adam/learner/prepositions.py index 7a36a928e..a59c458de 100644 --- a/adam/learner/prepositions.py +++ b/adam/learner/prepositions.py @@ -74,7 +74,7 @@ def _preprocess_scene_for_learning( return post_recognition_object_perception_alignment def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph, allow_undescribed: bool = False + self, perception_graph: PerceptionGraph ) -> PerceptionGraphFromObjectRecognizer: return self._object_recognizer.match_objects_old(perception_graph) diff --git a/adam/learner/template_learner.py b/adam/learner/template_learner.py index c98d00c3b..7c7d719f7 100644 --- a/adam/learner/template_learner.py +++ b/adam/learner/template_learner.py @@ -94,15 +94,13 @@ def observe( self._learning_step(preprocessed_input, surface_template) def describe( - self, - perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame], - allow_undescribed: bool = False, + self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame] ) -> Mapping[LinguisticDescription, float]: self._assert_valid_input(perception) original_perception_graph = self._extract_perception_graph(perception) preprocessing_result = self._preprocess_scene_for_description( - original_perception_graph, allow_undescribed=allow_undescribed + original_perception_graph ) preprocessed_perception_graph = preprocessing_result.perception_graph matched_objects_to_names = ( @@ -200,7 +198,7 @@ def _preprocess_scene_for_learning( @abstractmethod def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph, allow_undescribed: bool = False + self, perception_graph: PerceptionGraph ) -> PerceptionGraphFromObjectRecognizer: """ Does any preprocessing necessary before attempting to describe a scene. diff --git a/adam/learner/verbs.py b/adam/learner/verbs.py index 072806767..a0cbcb659 100644 --- a/adam/learner/verbs.py +++ b/adam/learner/verbs.py @@ -148,7 +148,7 @@ def _preprocess_scene_for_learning( return post_recognition_object_perception_alignment def _preprocess_scene_for_description( - self, perception_graph: PerceptionGraph, allow_undescribed: bool = False + self, perception_graph: PerceptionGraph ) -> PerceptionGraphFromObjectRecognizer: return self._object_recognizer.match_objects_old(perception_graph) diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py index 7f23ea6b7..eba98eeac 100644 --- a/tests/learner/learn_imprecise_descriptions_test.py +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -95,9 +95,7 @@ def run_imprecise_test(learner, situation_template, language_generator): test_lingustics_description, test_perceptual_representation, ) in test_curriculum.instances(): - descriptions_from_learner = learner.describe( - test_perceptual_representation, allow_undescribed=True - ) + descriptions_from_learner = learner.describe(test_perceptual_representation) gold = test_lingustics_description.as_token_sequence() assert descriptions_from_learner assert gold in [desc.as_token_sequence() for desc in descriptions_from_learner] From cb1447fd15f8b516d16b6bcdca657d323dcf18d2 Mon Sep 17 00:00:00 2001 From: Deniz Beser Date: Thu, 6 Aug 2020 13:36:31 -0400 Subject: [PATCH 14/28] added special edge type --- adam/ontology/phase1_ontology.py | 12 ++++- ...n_to_developmental_primitive_perception.py | 46 ++++++++++--------- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/adam/ontology/phase1_ontology.py b/adam/ontology/phase1_ontology.py index 62d07d339..a05c9d48d 100644 --- a/adam/ontology/phase1_ontology.py +++ b/adam/ontology/phase1_ontology.py @@ -654,6 +654,9 @@ def _far_region_factory( subtype(SIZE_RELATION, RELATION) BIGGER_THAN = OntologyNode("biggerThan") +BIGGER_THAN_SAME_TYPE = OntologyNode( + "biggerThanSameType" +) # For relative size between objects of same type SAME_TYPE = OntologyNode("same-type") """ @@ -663,19 +666,26 @@ def _far_region_factory( https://github.com/isi-vista/adam/issues/70 """ subtype(BIGGER_THAN, SIZE_RELATION) +subtype(BIGGER_THAN_SAME_TYPE, SIZE_RELATION) SMALLER_THAN = OntologyNode("smallerThan") +SMALLER_THAN_SAME_TYPE = OntologyNode( + "smallerThanSameType" +) # For relative size between objects of same type """ A relation indicating that one object is smaller than another object. This is a placeholder for a more sophisticated representation of size: https://github.com/isi-vista/adam/issues/70 """ -subtype(SMALLER_THAN, SIZE_RELATION) +subtype(SMALLER_THAN_SAME_TYPE, SIZE_RELATION) bigger_than = make_opposite_dsl_relation( # pylint:disable=invalid-name BIGGER_THAN, opposite_type=SMALLER_THAN ) +bigger_than_same = make_opposite_dsl_relation( # pylint:disable=invalid-name + BIGGER_THAN_SAME_TYPE, opposite_type=SMALLER_THAN_SAME_TYPE +) AXIS_RELATION = OntologyNode("axis-relation") subtype(AXIS_RELATION, RELATION) diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index 4983182a9..4995f04ef 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -2,8 +2,20 @@ from itertools import chain from typing import Dict, Iterable, List, Mapping, MutableMapping, Optional, Union, cast +from attr import Factory, attrib, attrs +from attr.validators import deep_mapping, instance_of +from immutablecollections import ( + ImmutableDict, + ImmutableSet, + ImmutableSetMultiDict, + immutabledict, + immutableset, + immutablesetmultidict, +) +from immutablecollections.converter_utils import _to_immutabledict from more_itertools import only, quantify from networkx import DiGraph +from vistautils.preconditions import check_arg from adam.axes import AxesInfo, WORLD_AXES from adam.axis import GeonAxis @@ -21,7 +33,6 @@ from adam.ontology.ontology import Ontology from adam.ontology.phase1_ontology import ( ABOUT_THE_SAME_SIZE_AS_LEARNER, - SAME_TYPE, BABY, COLOR, COLORS_TO_RGBS, @@ -35,6 +46,9 @@ SIZE_RELATIONS, TWO_DIMENSIONAL, on, + BIGGER_THAN_SAME_TYPE, + BIGGER_THAN, + SMALLER_THAN_SAME_TYPE, ) from adam.ontology.phase1_spatial_relations import ( EXTERIOR_BUT_IN_CONTACT, @@ -63,18 +77,6 @@ from adam.relation import Relation from adam.situation import Action, SituationObject, SituationRegion from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation -from attr import Factory, attrib, attrs -from attr.validators import deep_mapping, instance_of -from immutablecollections import ( - ImmutableDict, - ImmutableSet, - ImmutableSetMultiDict, - immutabledict, - immutableset, - immutablesetmultidict, -) -from immutablecollections.converter_utils import _to_immutabledict -from vistautils.preconditions import check_arg class ColorPerceptionMode(Enum): @@ -748,20 +750,22 @@ def _perceive_all_relative_size(self) -> None: ) ] for relation in size_relations: - perception = self._objects_to_perceptions[relation.first_slot] - # only record relative size if the objects are of the same type, and record this as well if they are - self._property_assertion_perceptions.append( - HasBinaryProperty(perception, relation.relation_type) + # We convert biggerThan to biggerThanSameType to denote it's a special type of relative size between + # objects of same type. + new_relation_type = ( + BIGGER_THAN_SAME_TYPE + if relation.relation_type == BIGGER_THAN + else SMALLER_THAN_SAME_TYPE ) - # add a relation indicating that these are of the same type - same_relation = Relation( + size_relation_same_type = Relation( + relation_type=new_relation_type, first_slot=self._objects_to_perceptions[relation.first_slot], second_slot=self._objects_to_perceptions[ # type:ignore relation.second_slot ], - relation_type=SAME_TYPE, + negated=relation.negated, ) - self._relation_perceptions.extend([same_relation]) + self._relation_perceptions.extend([size_relation_same_type]) def _perceive_size_relative_to_learner(self) -> None: """ From 46dcfb4e64d93ba9eba7a87b4fb3e60dcd6f667d Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 11:41:15 -0400 Subject: [PATCH 15/28] add gaze to curricula --- adam/curriculum/imprecise_descriptions_curriculum.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/adam/curriculum/imprecise_descriptions_curriculum.py b/adam/curriculum/imprecise_descriptions_curriculum.py index a81222c5a..37110ce05 100644 --- a/adam/curriculum/imprecise_descriptions_curriculum.py +++ b/adam/curriculum/imprecise_descriptions_curriculum.py @@ -201,6 +201,7 @@ def _tall_x_template( salient_object_variables=[theme1], background_object_variables=computed_background, asserted_always_relations=[bigger_than(theme1, theme2)], + gazed_objects=[theme1], ) @@ -218,6 +219,7 @@ def _big_x_template( salient_object_variables=[theme1], background_object_variables=computed_background, asserted_always_relations=[bigger_than(theme1, theme2)], + gazed_objects=[theme1], ) @@ -235,6 +237,7 @@ def _little_x_template( salient_object_variables=[theme1], background_object_variables=computed_background, asserted_always_relations=[bigger_than(theme2, theme1)], + gazed_objects=[theme1], ) @@ -252,6 +255,7 @@ def _short_x_template( salient_object_variables=[theme1], background_object_variables=computed_background, asserted_always_relations=[bigger_than(theme2, theme1)], + gazed_objects=[theme1], ) From 046f42d31e3a6f257dd0d61770c925639086aed3 Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 13:22:27 -0400 Subject: [PATCH 16/28] skip a few tests temporarily to rebase and pass CI --- tests/learner/learn_imprecise_descriptions_test.py | 9 +++++---- ...tuation_to_developmental_primitive_perception_test.py | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py index eba98eeac..cda7a2f24 100644 --- a/tests/learner/learn_imprecise_descriptions_test.py +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -15,7 +15,8 @@ _tall_x_template, ) from adam.learner import LearningExample -from adam.learner.verbs import SubsetVerbLearnerNew + +# from adam.learner.verbs import SubsetVerbLearnerNew from adam.learner.attributes import SubsetAttributeLearner, SubsetAttributeLearnerNew from adam.learner.integrated_learner import IntegratedTemplateLearner from adam.learner.language_mode import LanguageMode @@ -42,9 +43,9 @@ def integrated_learner_factory(language_mode: LanguageMode): attribute_learner=SubsetAttributeLearnerNew( ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode ), - action_learner=SubsetVerbLearnerNew( - ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode - ), + # action_learner=SubsetVerbLearnerNew( + # ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode + # ), ) diff --git a/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py b/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py index a34c7460f..ddd07278e 100644 --- a/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py +++ b/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py @@ -80,6 +80,7 @@ ) +@pytest.mark.skip("TODO: fix this") def test_big_ball(): ball1 = situation_object(BALL, debug_handle="ball_0") ball2 = situation_object(BALL, debug_handle="ball_1") From 5e3f2993049d3b3314c14951d5eb1310d1bc58fc Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 13:28:46 -0400 Subject: [PATCH 17/28] fix imports to pass pylint --- ...ation_to_developmental_primitive_perception.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index 7a31459d8..4d285d0c4 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -3,22 +3,8 @@ from random import Random from typing import Dict, Iterable, List, Mapping, MutableMapping, Optional, Union, cast from typing_extensions import Protocol, runtime - -from attr import Factory, attrib, attrs -from attr.validators import deep_mapping, instance_of -from immutablecollections import ( - ImmutableDict, - ImmutableSet, - ImmutableSetMultiDict, - immutabledict, - immutableset, - immutablesetmultidict, -) -from immutablecollections.converter_utils import _to_immutabledict from more_itertools import only, quantify from networkx import DiGraph -from vistautils.preconditions import check_arg - from adam.axes import AxesInfo, WORLD_AXES from adam.axis import GeonAxis from adam.geon import Geon @@ -93,6 +79,7 @@ from vistautils.preconditions import check_arg from vistautils.range import Range + class ColorPerceptionMode(Enum): """ Used as a field on `HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator` From 947d8c24718c580bd85d39a1da3a0e391f565350 Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 13:52:03 -0400 Subject: [PATCH 18/28] issue found; temporary hackish fix to do with ordering of gaze vs. non-gaze --- adam/learner/object_recognizer.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index dfc7ed127..3fb44b1e6 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -325,6 +325,7 @@ def match_objects( num_object_nodes = candidate_object_graph.count_nodes_matching( lambda node: isinstance(node, ObjectPerception) ) + print(candidate_object_graph.copy_as_digraph().node) for (concept, pattern) in concepts_to_patterns.items(): # As an optimization, we count how many sub-object nodes @@ -333,7 +334,7 @@ def match_objects( # and we can bail out early. if num_object_nodes != self._concept_to_num_subobjects[concept]: continue - + print("matching to", concept.debug_string) with Timer(factor=1000) as t: matcher = pattern.matcher( candidate_object_graph, match_mode=MatchMode.OBJECT @@ -342,6 +343,7 @@ def match_objects( matcher.matches(use_lookahead_pruning=True), None ) if pattern_match: + print("MATCH SUCCESSFUL") cumulative_millis_in_successful_matches_ms += t.elapsed matched_object_node = ObjectSemanticNode(concept) @@ -374,7 +376,10 @@ def match_objects( break else: cumulative_millis_in_failed_matches_ms += t.elapsed + if object_nodes: + # TODO : fix this in a less hacky way + object_nodes.reverse() logging.info( "Object recognizer recognized: %s", [concept for (concept, _) in object_nodes], From 1bd68939f52c921dd9aa3c4ca376a8ff614df6ec Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 14:22:24 -0400 Subject: [PATCH 19/28] sort from low to high graph complexity when matching candidate objects in the object recognizer --- adam/learner/object_recognizer.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index 3fb44b1e6..3bfc57c0f 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -378,8 +378,6 @@ def match_objects( cumulative_millis_in_failed_matches_ms += t.elapsed if object_nodes: - # TODO : fix this in a less hacky way - object_nodes.reverse() logging.info( "Object recognizer recognized: %s", [concept for (concept, _) in object_nodes], @@ -644,6 +642,12 @@ def is_root_object_node(node) -> bool: immutableset(candidate_subgraph_nodes) ) ) + # we sort the candidate objects' graphs from least to greatest number of nodes in the graph. This allows us to match objects + # with less cost before objects with greater cost, and also causes us to match gazed objects after non-gazed objects, which is the + # order needed to ensure that gaze is assigned to the correct object if there are multiple in the scene + candidate_objects.sort( + key=lambda x: len(x._graph.node) # pylint:disable=protected-access + ) return candidate_objects From 0a93dce827001340aae8b330da3bf8b096814056 Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 14:26:54 -0400 Subject: [PATCH 20/28] remove print/debug statements --- adam/learner/object_recognizer.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index 3bfc57c0f..dc93cfb5e 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -325,7 +325,6 @@ def match_objects( num_object_nodes = candidate_object_graph.count_nodes_matching( lambda node: isinstance(node, ObjectPerception) ) - print(candidate_object_graph.copy_as_digraph().node) for (concept, pattern) in concepts_to_patterns.items(): # As an optimization, we count how many sub-object nodes @@ -334,7 +333,6 @@ def match_objects( # and we can bail out early. if num_object_nodes != self._concept_to_num_subobjects[concept]: continue - print("matching to", concept.debug_string) with Timer(factor=1000) as t: matcher = pattern.matcher( candidate_object_graph, match_mode=MatchMode.OBJECT @@ -343,7 +341,6 @@ def match_objects( matcher.matches(use_lookahead_pruning=True), None ) if pattern_match: - print("MATCH SUCCESSFUL") cumulative_millis_in_successful_matches_ms += t.elapsed matched_object_node = ObjectSemanticNode(concept) From 94387c4af4684d838fbbde4cfda541410ec6d83a Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 14:28:19 -0400 Subject: [PATCH 21/28] add back in verb learner to integrated learner --- tests/learner/learn_imprecise_descriptions_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py index cda7a2f24..89c4e9a75 100644 --- a/tests/learner/learn_imprecise_descriptions_test.py +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -16,7 +16,7 @@ ) from adam.learner import LearningExample -# from adam.learner.verbs import SubsetVerbLearnerNew +from adam.learner.verbs import SubsetVerbLearnerNew from adam.learner.attributes import SubsetAttributeLearner, SubsetAttributeLearnerNew from adam.learner.integrated_learner import IntegratedTemplateLearner from adam.learner.language_mode import LanguageMode @@ -43,9 +43,9 @@ def integrated_learner_factory(language_mode: LanguageMode): attribute_learner=SubsetAttributeLearnerNew( ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode ), - # action_learner=SubsetVerbLearnerNew( - # ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode - # ), + action_learner=SubsetVerbLearnerNew( + ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode + ), ) From 2f89445f00df82d9eb208696336a0437876d1706 Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 14:31:25 -0400 Subject: [PATCH 22/28] add back in verb learner to integrated learner --- tests/learner/learn_imprecise_descriptions_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/learner/learn_imprecise_descriptions_test.py b/tests/learner/learn_imprecise_descriptions_test.py index 89c4e9a75..0237a50f3 100644 --- a/tests/learner/learn_imprecise_descriptions_test.py +++ b/tests/learner/learn_imprecise_descriptions_test.py @@ -44,7 +44,7 @@ def integrated_learner_factory(language_mode: LanguageMode): ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode ), action_learner=SubsetVerbLearnerNew( - ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode + ontology=GAILA_PHASE_1_ONTOLOGY, beam_size=5, language_mode=language_mode ), ) From 3627eb23eb6332443d5adf282815bbc01f3f8d1a Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 15:12:50 -0400 Subject: [PATCH 23/28] add bool flag so that only sort in object recognizer rather than object learner --- adam/learner/object_recognizer.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index dc93cfb5e..2e8b9dfd6 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -319,7 +319,7 @@ def match_objects( matched_object_node, graph_to_return, pattern_match ) - candidate_object_subgraphs = extract_candidate_objects(perception_graph) + candidate_object_subgraphs = extract_candidate_objects(perception_graph, True) for candidate_object_graph in candidate_object_subgraphs: num_object_nodes = candidate_object_graph.count_nodes_matching( @@ -539,7 +539,7 @@ def _init_patterns_to_num_subobjects(self) -> ImmutableDict[ObjectConcept, int]: def extract_candidate_objects( - whole_scene_perception_graph: PerceptionGraph + whole_scene_perception_graph: PerceptionGraph, sort_by_increasing_size: bool = False ) -> Sequence[PerceptionGraph]: """ @@ -642,9 +642,10 @@ def is_root_object_node(node) -> bool: # we sort the candidate objects' graphs from least to greatest number of nodes in the graph. This allows us to match objects # with less cost before objects with greater cost, and also causes us to match gazed objects after non-gazed objects, which is the # order needed to ensure that gaze is assigned to the correct object if there are multiple in the scene - candidate_objects.sort( - key=lambda x: len(x._graph.node) # pylint:disable=protected-access - ) + if sort_by_increasing_size: + candidate_objects.sort( + key=lambda x: len(x._graph.node) # pylint:disable=protected-access + ) return candidate_objects From 9e191c60c457b403e6f483b255a781d0b267f247 Mon Sep 17 00:00:00 2001 From: paynesa Date: Mon, 10 Aug 2020 15:17:25 -0400 Subject: [PATCH 24/28] update perception tests based on new edge implemented by Deniz --- ...ion_to_developmental_primitive_perception_test.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py b/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py index 2cb10ba43..dfae211da 100644 --- a/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py +++ b/tests/perception/high_level_semantics_situation_to_developmental_primitive_perception_test.py @@ -5,7 +5,8 @@ from adam.axes import HorizontalAxisOfObject from adam.ontology import IN_REGION, OntologyNode, IS_SPEAKER from adam.ontology.phase1_ontology import ( - SAME_TYPE, + BIGGER_THAN_SAME_TYPE, + SMALLER_THAN_SAME_TYPE, AGENT, ANIMATE, BALL, @@ -80,7 +81,6 @@ ) -@pytest.mark.skip("TODO: fix this") def test_big_ball(): ball1 = situation_object(BALL, debug_handle="ball_0") ball2 = situation_object(BALL, debug_handle="ball_1") @@ -102,9 +102,13 @@ def test_big_ball(): perceived_objects = ball_perception.frames[0].perceived_objects object_handles = set(obj.debug_handle for obj in perceived_objects) - assert object_handles == {"ball_0", "ball_1", "the ground"} + assert object_handles == {"**ball_0", "**ball_1", "the ground"} assert any( - relation.relation_type == SAME_TYPE + relation.relation_type == BIGGER_THAN_SAME_TYPE + for relation in ball_perception.frames[0].relations + ) + assert any( + relation.relation_type == SMALLER_THAN_SAME_TYPE for relation in ball_perception.frames[0].relations ) From 4ba80aa261c90fed567a573c119986cc1e26dff1 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 13 Aug 2020 16:27:51 -0400 Subject: [PATCH 25/28] remove debug and commented code --- adam/learner/template_learner.py | 1 - tests/learner/subset_verb_learner_test.py | 1 - 2 files changed, 2 deletions(-) diff --git a/adam/learner/template_learner.py b/adam/learner/template_learner.py index fc76d2641..5dbd24b86 100644 --- a/adam/learner/template_learner.py +++ b/adam/learner/template_learner.py @@ -136,7 +136,6 @@ def match_template( pattern=pattern, match=match, matched_objects_to_names=matched_objects_to_names, - # allow_undescribed=allow_undescribed, ), pattern, score, diff --git a/tests/learner/subset_verb_learner_test.py b/tests/learner/subset_verb_learner_test.py index 73e5c6e1b..5ced34650 100644 --- a/tests/learner/subset_verb_learner_test.py +++ b/tests/learner/subset_verb_learner_test.py @@ -142,7 +142,6 @@ def run_verb_test(learner, situation_template, language_generator): ) in test_curriculum.instances(): descriptions_from_learner = learner.describe(test_perceptual_representation) gold = test_lingustics_description.as_token_sequence() - print(gold) assert descriptions_from_learner assert gold in [desc.as_token_sequence() for desc in descriptions_from_learner] From 22aa52ed258a4ef946086c0c45c054273b8eb298 Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 13 Aug 2020 16:40:57 -0400 Subject: [PATCH 26/28] kwargs 1 --- adam/learner/object_recognizer.py | 6 ++++-- adam/learner/objects.py | 8 +++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/adam/learner/object_recognizer.py b/adam/learner/object_recognizer.py index 2e8b9dfd6..ad6e855b1 100644 --- a/adam/learner/object_recognizer.py +++ b/adam/learner/object_recognizer.py @@ -319,7 +319,9 @@ def match_objects( matched_object_node, graph_to_return, pattern_match ) - candidate_object_subgraphs = extract_candidate_objects(perception_graph, True) + candidate_object_subgraphs = extract_candidate_objects( + perception_graph, sort_by_increasing_size=True + ) for candidate_object_graph in candidate_object_subgraphs: num_object_nodes = candidate_object_graph.count_nodes_matching( @@ -539,7 +541,7 @@ def _init_patterns_to_num_subobjects(self) -> ImmutableDict[ObjectConcept, int]: def extract_candidate_objects( - whole_scene_perception_graph: PerceptionGraph, sort_by_increasing_size: bool = False + whole_scene_perception_graph: PerceptionGraph, sort_by_increasing_size: bool ) -> Sequence[PerceptionGraph]: """ diff --git a/adam/learner/objects.py b/adam/learner/objects.py index 5a8d90b56..bbb0d47d8 100644 --- a/adam/learner/objects.py +++ b/adam/learner/objects.py @@ -490,7 +490,8 @@ def _hypotheses_from_perception( template_variable_to_pattern_node=immutabledict(), ) for candidate_object in extract_candidate_objects( - learning_state.perception_semantic_alignment.perception_graph + learning_state.perception_semantic_alignment.perception_graph, + sort_by_increasing_size=False, ) ) @@ -556,7 +557,7 @@ def _enrich_post_process( new_nodes = [] perception_graph_after_processing = perception_graph_after_matching for candiate_object_graph in extract_candidate_objects( - perception_graph_after_matching + perception_graph_after_matching, sort_by_increasing_size=False ): fake_pattern_graph = PerceptionGraphPattern.from_graph(candiate_object_graph) fake_object_semantic_node = ObjectSemanticNode( @@ -685,7 +686,8 @@ def _hypotheses_from_perception( template_variable_to_pattern_node=immutabledict(), ) for candidate_object in extract_candidate_objects( - learning_state.perception_semantic_alignment.perception_graph + learning_state.perception_semantic_alignment.perception_graph, + sort_by_increasing_size=False, ) ) From 02fc988751c8614039db950d18891a78b008579f Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 13 Aug 2020 17:17:07 -0400 Subject: [PATCH 27/28] kwargs 2 --- ...ttribute_constraining_action_curriculum.py | 3 + .../imprecise_descriptions_curriculum.py | 17 +++++ adam/curriculum/m6_curriculum.py | 6 ++ adam/curriculum/phase1_curriculum.py | 44 ++++++++++++ adam/curriculum/phase2_curriculum.py | 3 + adam/curriculum/preposition_curriculum.py | 18 +++++ adam/curriculum/pursuit_curriculum.py | 2 + ...bs_with_dynamic_prepositions_curriculum.py | 70 +++++++++++++++++++ adam/experiment/experiment_utils.py | 2 + adam/situation/templates/phase1_templates.py | 2 +- tests/learner/object_learner_test.py | 1 + .../pursuit_preposition_learner_test.py | 16 +++++ .../learner/subset_attribute_learner_test.py | 6 ++ .../subset_preposition_learner_test.py | 5 ++ tests/learner/subset_verb_learner_test.py | 4 ++ tests/learner/test_object_recognizer.py | 1 + .../templates/phase1_template_test.py | 4 ++ 17 files changed, 203 insertions(+), 1 deletion(-) diff --git a/adam/curriculum/attribute_constraining_action_curriculum.py b/adam/curriculum/attribute_constraining_action_curriculum.py index 5999cabf4..6ce0dcc1d 100644 --- a/adam/curriculum/attribute_constraining_action_curriculum.py +++ b/adam/curriculum/attribute_constraining_action_curriculum.py @@ -45,6 +45,7 @@ def make_human_eat_curriculum( max_to_sample=num_samples if num_samples else 5, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -69,6 +70,7 @@ def make_animal_eat_curriculum( max_to_sample=num_samples if num_samples else 5, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -100,6 +102,7 @@ def make_german_eat_test_curriculum( max_to_sample=num_samples if num_samples else 5, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ) ] ), diff --git a/adam/curriculum/imprecise_descriptions_curriculum.py b/adam/curriculum/imprecise_descriptions_curriculum.py index 37110ce05..9e9959f99 100644 --- a/adam/curriculum/imprecise_descriptions_curriculum.py +++ b/adam/curriculum/imprecise_descriptions_curriculum.py @@ -430,6 +430,7 @@ def make_throw_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -446,6 +447,7 @@ def make_throw_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -463,6 +465,7 @@ def make_throw_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET for is_up in BOOL_SET @@ -480,6 +483,7 @@ def make_throw_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -524,6 +528,7 @@ def make_move_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -540,6 +545,7 @@ def make_move_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -581,6 +587,7 @@ def make_jump_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for use_adverbial_path_modifier in (True, False) for is_fast in BOOL_SET @@ -620,6 +627,7 @@ def make_take_grab_subtle_verb_distinction( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for use_adverbial_path_modifier in BOOL_SET for hard_force in BOOL_SET @@ -678,6 +686,7 @@ def make_push_shove_subtle_verb_distinctions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for template in templates ] @@ -721,6 +730,7 @@ def make_walk_run_subtle_verb_distinction( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for use_adverbial_path_modifier in BOOL_SET for hard_force in BOOL_SET @@ -761,6 +771,7 @@ def make_pass_toss_subtle_verb_distinction( else [SOFT_FORCE], background=background, ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, @@ -804,6 +815,7 @@ def make_roll_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -820,6 +832,7 @@ def make_roll_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -836,6 +849,7 @@ def make_roll_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), @@ -871,6 +885,7 @@ def make_fly_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_up in BOOL_SET for syntax_hints in syntax_hints_options @@ -908,6 +923,7 @@ def make_fall_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for object_ends_up_on_ground in BOOL_SET for syntax_hints in syntax_hints_options @@ -924,6 +940,7 @@ def make_fall_imprecise_temporal_descriptions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_fast in BOOL_SET ), diff --git a/adam/curriculum/m6_curriculum.py b/adam/curriculum/m6_curriculum.py index 8724f5a52..061128212 100644 --- a/adam/curriculum/m6_curriculum.py +++ b/adam/curriculum/m6_curriculum.py @@ -135,6 +135,7 @@ def _make_m6_on_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 1, + block_multiple_of_the_same_type=True, ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3) for object_2 in r.sample(LARGE_OBJECT_VARS, 3) @@ -167,6 +168,7 @@ def _make_m6_beside_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 1, + block_multiple_of_the_same_type=True, ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3) for object_2 in r.sample(LARGE_OBJECT_VARS, 3) @@ -199,6 +201,7 @@ def _make_m6_under_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 1, + block_multiple_of_the_same_type=True, ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3) for object_2 in r.sample(LARGE_OBJECT_VARS, 3) @@ -231,6 +234,7 @@ def _make_m6_over_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 1, + block_multiple_of_the_same_type=True, ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3) for object_2 in r.sample(LARGE_OBJECT_VARS, 3) @@ -264,6 +268,7 @@ def _make_m6_behind_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 1, + block_multiple_of_the_same_type=True, ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3) for object_2 in r.sample(LARGE_OBJECT_VARS, 3) @@ -297,6 +302,7 @@ def _make_m6_in_front_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 1, + block_multiple_of_the_same_type=True, ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3) for object_2 in r.sample(LARGE_OBJECT_VARS, 3) diff --git a/adam/curriculum/phase1_curriculum.py b/adam/curriculum/phase1_curriculum.py index 5f2204682..78424cfa6 100644 --- a/adam/curriculum/phase1_curriculum.py +++ b/adam/curriculum/phase1_curriculum.py @@ -211,6 +211,7 @@ def _make_each_object_by_itself_curriculum( # pylint: disable=unused-argument max_to_sample=num_samples, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -223,6 +224,7 @@ def _make_each_object_by_itself_curriculum( # pylint: disable=unused-argument max_to_sample=num_samples, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -236,6 +238,7 @@ def _make_each_object_by_itself_curriculum( # pylint: disable=unused-argument ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=5, + block_multiple_of_the_same_type=True, ) for object in [MOM, DAD, BABY] ), @@ -245,6 +248,7 @@ def _make_each_object_by_itself_curriculum( # pylint: disable=unused-argument ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=5, + block_multiple_of_the_same_type=True, ) for object in [MOM, DAD, BABY] ), @@ -283,6 +287,7 @@ def _make_objects_with_colors_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 80, + block_multiple_of_the_same_type=True, ) ] ), @@ -320,6 +325,7 @@ def _make_objects_with_colors_is_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 80, + block_multiple_of_the_same_type=True, ) ] ), @@ -467,6 +473,7 @@ def _make_object_on_ground_curriculum( # pylint: disable=unused-argument ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -479,6 +486,7 @@ def _make_object_on_ground_curriculum( # pylint: disable=unused-argument ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -531,6 +539,7 @@ def _make_person_has_object_curriculum( inanimate_object_0, background=background, ), + block_multiple_of_the_same_type=True, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 35, @@ -599,6 +608,7 @@ def _make_part_whole_curriculum( # pylint: disable=unused-argument chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ).instances() @@ -620,6 +630,7 @@ def _make_part_whole_curriculum( # pylint: disable=unused-argument chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=3, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ).instances() @@ -672,6 +683,7 @@ def _make_my_your_object_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 20, + block_multiple_of_the_same_type=True, ) for person in owners ] @@ -797,6 +809,7 @@ def _make_fall_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -860,6 +873,7 @@ def _make_transfer_of_possession_curriculum( max_to_sample=num_samples if num_samples else 100, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for template in make_give_templates(background) ] @@ -896,6 +910,7 @@ def _make_object_on_object_curriculum( max_to_sample=num_samples if num_samples else 100, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -937,6 +952,7 @@ def _make_object_beside_object_curriculum( max_to_sample=num_samples if num_samples else 50, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -977,6 +993,7 @@ def _make_object_under_or_over_object_curriculum( max_to_sample=num_samples if num_samples else 100, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for template in templates ] @@ -1023,12 +1040,14 @@ def _make_object_in_other_object_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ), sampled( solid_template, max_to_sample=num_samples * 3 if num_samples else 75, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ), ] ), @@ -1115,6 +1134,7 @@ def _make_fly_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -1277,6 +1297,7 @@ def _make_roll_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for situation in make_roll_templates(noise_objects) ] @@ -1323,6 +1344,7 @@ def _make_transitive_roll_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for situation in make_transitive_roll_templates(noise_objects) ] @@ -1400,6 +1422,7 @@ def _make_templates() -> Iterable[Phase1SituationTemplate]: max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for template in _make_templates() ) @@ -1546,6 +1569,7 @@ def _make_jump_curriculum( use_adverbial_path_modifier=use_adverbial_path_modifier, background=background, ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 25, @@ -1561,6 +1585,7 @@ def _make_jump_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) ] ), @@ -1611,6 +1636,7 @@ def _make_put_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 25, + block_multiple_of_the_same_type=True, ) for template in make_put_templates(noise_objects) ] @@ -1658,6 +1684,7 @@ def _make_put_on_speaker_addressee_body_part_curriculum( body_part_of_putter, background=make_noise_objects(noise_objects), ), + block_multiple_of_the_same_type=True, max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, @@ -1740,6 +1767,7 @@ def _make_drink_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -1754,6 +1782,7 @@ def _make_drink_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ), ] ), @@ -1806,6 +1835,7 @@ def _make_eat_curriculum( max_to_sample=num_samples if num_samples else 25, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ) ] ), @@ -1938,6 +1968,7 @@ def _make_sit_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -2082,6 +2113,7 @@ def _make_take_curriculum( operator=operator, background=make_noise_objects(noise_objects), ), + block_multiple_of_the_same_type=True, max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, @@ -2216,6 +2248,7 @@ def _make_move_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for situation in make_move_templates(noise_objects) ] @@ -2280,6 +2313,7 @@ def _make_spin_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for situation in make_spin_templates(noise_objects) ] @@ -2338,6 +2372,7 @@ def _make_go_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for situation in make_go_templates(noise_objects) ] @@ -2354,6 +2389,7 @@ def _make_go_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for is_distal in (True, False) ] @@ -2478,6 +2514,7 @@ def _make_push_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for adverbial_path_modifier in [True, False] for operator in [TOWARD, AWAY_FROM] @@ -2800,6 +2837,7 @@ def _make_throw_curriculum( max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for template in make_throw_templates(noise_objects) ] @@ -2842,6 +2880,7 @@ def _make_pass_curriculum( operator=operator, background=make_noise_objects(noise_objects), ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 25, @@ -2948,6 +2987,7 @@ def _make_come_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -2960,6 +3000,7 @@ def _make_come_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -2972,12 +3013,14 @@ def _make_come_curriculum( max_to_sample=num_samples if num_samples else 25, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ), sampled( _make_come_down_template(movee, object_, speaker, ground, background), max_to_sample=num_samples if num_samples else 25, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ), ] ), @@ -3049,6 +3092,7 @@ def make_behind_in_front_templates() -> Iterable[Phase1SituationTemplate]: max_to_sample=num_samples if num_samples else 25, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ) for template in make_behind_in_front_templates() ) diff --git a/adam/curriculum/phase2_curriculum.py b/adam/curriculum/phase2_curriculum.py index 3cdd0f372..b23ca13b0 100644 --- a/adam/curriculum/phase2_curriculum.py +++ b/adam/curriculum/phase2_curriculum.py @@ -124,6 +124,7 @@ def _make_sit_on_chair_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_2_ONTOLOGY, max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -180,6 +181,7 @@ def _make_drink_cups_curriculum( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_2_ONTOLOGY, max_to_sample=num_samples, + block_multiple_of_the_same_type=True, ) if num_samples else all_possible( @@ -218,6 +220,7 @@ def _make_put_in_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 20, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) diff --git a/adam/curriculum/preposition_curriculum.py b/adam/curriculum/preposition_curriculum.py index d62857a84..9087f19e5 100644 --- a/adam/curriculum/preposition_curriculum.py +++ b/adam/curriculum/preposition_curriculum.py @@ -293,6 +293,7 @@ def _make_on_training( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -338,6 +339,7 @@ def _make_beside_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -378,6 +380,7 @@ def _make_under_training( is_distal=use_above_below, syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [], ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, @@ -424,6 +427,7 @@ def _make_over_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -464,6 +468,7 @@ def _make_in_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -509,6 +514,7 @@ def _make_behind_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -556,6 +562,7 @@ def _make_in_front_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -603,6 +610,7 @@ def _make_near_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -649,6 +657,7 @@ def _make_far_training( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -705,6 +714,7 @@ def _make_on_tests( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -756,6 +766,7 @@ def _make_beside_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -803,6 +814,7 @@ def _make_under_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -853,6 +865,7 @@ def _make_over_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -907,6 +920,7 @@ def _make_in_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -956,6 +970,7 @@ def _make_behind_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -1008,6 +1023,7 @@ def _make_in_front_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -1059,6 +1075,7 @@ def _make_near_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds @@ -1109,6 +1126,7 @@ def _make_far_tests( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for figure in figures for ground in grounds diff --git a/adam/curriculum/pursuit_curriculum.py b/adam/curriculum/pursuit_curriculum.py index 48ad81462..57d7fa72d 100644 --- a/adam/curriculum/pursuit_curriculum.py +++ b/adam/curriculum/pursuit_curriculum.py @@ -105,6 +105,7 @@ def make_simple_pursuit_curriculum( max_to_sample=num_instances - num_noise_instances, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_2_ONTOLOGY, + block_multiple_of_the_same_type=True, ), perception_generator=perception_generator, language_generator=language_generator, @@ -128,6 +129,7 @@ def make_simple_pursuit_curriculum( max_to_sample=num_noise_instances, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_2_ONTOLOGY, + block_multiple_of_the_same_type=True, ), perception_generator=perception_generator, language_generator=language_generator, diff --git a/adam/curriculum/verbs_with_dynamic_prepositions_curriculum.py b/adam/curriculum/verbs_with_dynamic_prepositions_curriculum.py index 4e8649fcc..68dc41548 100644 --- a/adam/curriculum/verbs_with_dynamic_prepositions_curriculum.py +++ b/adam/curriculum/verbs_with_dynamic_prepositions_curriculum.py @@ -2561,6 +2561,7 @@ def _make_push_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for template in to_in_templates ] @@ -2580,6 +2581,7 @@ def _make_push_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -2599,6 +2601,7 @@ def _make_push_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -2619,6 +2622,7 @@ def _make_push_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -2640,6 +2644,7 @@ def _make_push_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_ending_proximal in BOOL_SET for is_towards in BOOL_SET @@ -2661,6 +2666,7 @@ def _make_push_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -2712,6 +2718,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -2723,6 +2730,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -2736,6 +2744,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -2754,6 +2763,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_behind in BOOL_SET @@ -2769,6 +2779,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -2786,6 +2797,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -2806,6 +2818,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_behind in BOOL_SET for is_near_path in BOOL_SET @@ -2827,6 +2840,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_over in BOOL_SET for is_near_goal in BOOL_SET @@ -2846,6 +2860,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_toward in BOOL_SET for is_near_goal in BOOL_SET @@ -2865,6 +2880,7 @@ def _make_go_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -2908,6 +2924,7 @@ def _make_sit_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options ] @@ -2922,6 +2939,7 @@ def _make_sit_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options ] @@ -2982,6 +3000,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3001,6 +3020,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_behind in BOOL_SET @@ -3019,6 +3039,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -3036,6 +3057,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -3054,6 +3076,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3074,6 +3097,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_behind in BOOL_SET @@ -3094,6 +3118,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_over in BOOL_SET ] @@ -3113,6 +3138,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_over in BOOL_SET for surface in surfaces @@ -3132,6 +3158,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_toward in BOOL_SET for surface in surfaces @@ -3152,6 +3179,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_toward in BOOL_SET for surface in surfaces @@ -3167,6 +3195,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for surface in surfaces ] @@ -3185,6 +3214,7 @@ def _make_roll_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for surface in surfaces ] @@ -3216,6 +3246,7 @@ def _make_take_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -3262,6 +3293,7 @@ def _make_fall_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options ] @@ -3276,6 +3308,7 @@ def _make_fall_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options ] @@ -3294,6 +3327,7 @@ def _make_fall_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options for is_right in BOOL_SET @@ -3314,6 +3348,7 @@ def _make_fall_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options for is_distal in BOOL_SET @@ -3334,6 +3369,7 @@ def _make_fall_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for syntax_hints in syntax_hints_options for is_toward in BOOL_SET @@ -3391,6 +3427,7 @@ def _make_put_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for template in on_in_templates ] @@ -3405,6 +3442,7 @@ def _make_put_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for speaker_addressee in special_agents ] @@ -3419,6 +3457,7 @@ def _make_put_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3433,6 +3472,7 @@ def _make_put_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3452,6 +3492,7 @@ def _make_put_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -3514,6 +3555,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3532,6 +3574,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -3547,6 +3590,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3561,6 +3605,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_toward in BOOL_SET ] @@ -3573,6 +3618,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for template in situation_templates ] @@ -3591,6 +3637,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3609,6 +3656,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3628,6 +3676,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -3647,6 +3696,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_toward in BOOL_SET ] @@ -3665,6 +3715,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3684,6 +3735,7 @@ def _make_move_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3732,6 +3784,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for template in situation_templates ] @@ -3746,6 +3799,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3765,6 +3819,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -3780,6 +3835,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3798,6 +3854,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -3816,6 +3873,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET ] @@ -3834,6 +3892,7 @@ def _make_throw_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_towards in BOOL_SET ] @@ -3885,6 +3944,7 @@ def _make_jump_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for template in templates ] @@ -3899,6 +3959,7 @@ def _make_jump_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3917,6 +3978,7 @@ def _make_jump_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -3961,6 +4023,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -3974,6 +4037,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_right in BOOL_SET ] @@ -3992,6 +4056,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_distal in BOOL_SET for is_in_front in BOOL_SET @@ -4005,6 +4070,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -4016,6 +4082,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -4029,6 +4096,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) for is_toward in BOOL_SET ] @@ -4041,6 +4109,7 @@ def _make_fly_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ), @@ -4079,6 +4148,7 @@ def _make_come_with_prepositions( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 5, + block_multiple_of_the_same_type=True, ) ] ) diff --git a/adam/experiment/experiment_utils.py b/adam/experiment/experiment_utils.py index c18a231a9..019c8a6ac 100644 --- a/adam/experiment/experiment_utils.py +++ b/adam/experiment/experiment_utils.py @@ -178,6 +178,7 @@ def _make_sit_on_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 25, + block_multiple_of_the_same_type=True, ), sampled( make_sit_transitive( @@ -186,6 +187,7 @@ def _make_sit_on_curriculum( ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=num_samples if num_samples else 25, + block_multiple_of_the_same_type=True, ), ] ), diff --git a/adam/situation/templates/phase1_templates.py b/adam/situation/templates/phase1_templates.py index 99d8d3e19..65e7d3c2e 100644 --- a/adam/situation/templates/phase1_templates.py +++ b/adam/situation/templates/phase1_templates.py @@ -321,7 +321,7 @@ def sampled( chooser: SequenceChooser, max_to_sample: int, default_addressee_node: OntologyNode = LEARNER, - block_multiple_of_the_same_type: bool = True, + block_multiple_of_the_same_type: bool, ) -> Iterable[HighLevelSemanticsSituation]: """ Gets *max_to_sample* instantiations of *situation_template* with *ontology* diff --git a/tests/learner/object_learner_test.py b/tests/learner/object_learner_test.py index 01faf1776..3b745dbf0 100644 --- a/tests/learner/object_learner_test.py +++ b/tests/learner/object_learner_test.py @@ -118,6 +118,7 @@ def run_subset_learner_for_object( chooser=PHASE1_TEST_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) diff --git a/tests/learner/pursuit_preposition_learner_test.py b/tests/learner/pursuit_preposition_learner_test.py index fd0edf516..bfefac7f5 100644 --- a/tests/learner/pursuit_preposition_learner_test.py +++ b/tests/learner/pursuit_preposition_learner_test.py @@ -60,6 +60,7 @@ def test_pursuit_preposition_on_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -70,6 +71,7 @@ def test_pursuit_preposition_on_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -120,6 +122,7 @@ def test_pursuit_preposition_beside_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -132,6 +135,7 @@ def test_pursuit_preposition_beside_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -182,6 +186,7 @@ def test_pursuit_preposition_under_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -194,6 +199,7 @@ def test_pursuit_preposition_under_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -242,6 +248,7 @@ def test_pursuit_preposition_over_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -254,6 +261,7 @@ def test_pursuit_preposition_over_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -302,6 +310,7 @@ def test_pursuit_preposition_in_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -312,6 +321,7 @@ def test_pursuit_preposition_in_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -366,6 +376,7 @@ def test_pursuit_preposition_behind_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -383,6 +394,7 @@ def test_pursuit_preposition_behind_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -437,6 +449,7 @@ def test_pursuit_preposition_in_front_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=10, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -454,6 +467,7 @@ def test_pursuit_preposition_in_front_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -495,6 +509,7 @@ def test_pursuit_preposition_has_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=2, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -506,6 +521,7 @@ def test_pursuit_preposition_has_learner(language_mode): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) diff --git a/tests/learner/subset_attribute_learner_test.py b/tests/learner/subset_attribute_learner_test.py index 7c413363f..a41402bb7 100644 --- a/tests/learner/subset_attribute_learner_test.py +++ b/tests/learner/subset_attribute_learner_test.py @@ -114,6 +114,7 @@ def test_subset_color_attribute( chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=2, + block_multiple_of_the_same_type=True, ) for template in templates ] @@ -129,6 +130,7 @@ def test_subset_color_attribute( chooser=PHASE1_TEST_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -189,6 +191,7 @@ def test_subset_my_attribute_learner_integrated(language_mode, learner): ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=5, + block_multiple_of_the_same_type=True, ) for person in [MOM, DAD, BABY] ), @@ -208,6 +211,7 @@ def test_subset_my_attribute_learner_integrated(language_mode, learner): inanimate_object, syntax_hints=[IGNORE_HAS_AS_VERB], ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_TEST_CHOOSER_FACTORY(), max_to_sample=1, @@ -278,6 +282,7 @@ def test_your_attribute_learner(language_mode, learner): background=[person_0], syntax_hints=[IGNORE_HAS_AS_VERB], ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), max_to_sample=5, @@ -294,6 +299,7 @@ def test_your_attribute_learner(language_mode, learner): background=[person_0], syntax_hints=[IGNORE_HAS_AS_VERB], ), + block_multiple_of_the_same_type=True, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_TEST_CHOOSER_FACTORY(), max_to_sample=1, diff --git a/tests/learner/subset_preposition_learner_test.py b/tests/learner/subset_preposition_learner_test.py index 4db6949f9..ed030d23f 100644 --- a/tests/learner/subset_preposition_learner_test.py +++ b/tests/learner/subset_preposition_learner_test.py @@ -66,6 +66,7 @@ def run_preposition_test(learner, situation_template, language_generator): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=2, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -76,6 +77,7 @@ def run_preposition_test(learner, situation_template, language_generator): chooser=PHASE1_TEST_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) @@ -297,6 +299,7 @@ def test_subset_preposition_has(language_mode, learner): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), ).instances() ) @@ -309,6 +312,7 @@ def test_subset_preposition_has(language_mode, learner): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), ).instances() ) @@ -320,6 +324,7 @@ def test_subset_preposition_has(language_mode, learner): chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, max_to_sample=1, + block_multiple_of_the_same_type=True, ), language_generator=language_generator, ) diff --git a/tests/learner/subset_verb_learner_test.py b/tests/learner/subset_verb_learner_test.py index 5ced34650..25db45034 100644 --- a/tests/learner/subset_verb_learner_test.py +++ b/tests/learner/subset_verb_learner_test.py @@ -106,6 +106,7 @@ def run_verb_test(learner, situation_template, language_generator): max_to_sample=10, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ) ] ), @@ -120,6 +121,7 @@ def run_verb_test(learner, situation_template, language_generator): max_to_sample=1, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_TEST_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ) ] ), @@ -503,6 +505,7 @@ def test_throw_animacy(language_mode, learner): max_to_sample=10, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ) for situation_template in make_throw_animacy_templates(None) ] @@ -519,6 +522,7 @@ def test_throw_animacy(language_mode, learner): max_to_sample=1, ontology=GAILA_PHASE_1_ONTOLOGY, chooser=PHASE1_CHOOSER_FACTORY(), + block_multiple_of_the_same_type=True, ) for situation_template in make_throw_animacy_templates(None) ] diff --git a/tests/learner/test_object_recognizer.py b/tests/learner/test_object_recognizer.py index f24942318..1e27a3277 100644 --- a/tests/learner/test_object_recognizer.py +++ b/tests/learner/test_object_recognizer.py @@ -139,6 +139,7 @@ def test_recognize_in_transfer_of_possession(language_mode): max_to_sample=1, chooser=PHASE1_CHOOSER_FACTORY(), ontology=GAILA_PHASE_1_ONTOLOGY, + block_multiple_of_the_same_type=True, ), ).instances() ) diff --git a/tests/situation/templates/phase1_template_test.py b/tests/situation/templates/phase1_template_test.py index 8a715b7a9..47149da85 100644 --- a/tests/situation/templates/phase1_template_test.py +++ b/tests/situation/templates/phase1_template_test.py @@ -160,6 +160,7 @@ def test_learner_as_default_addressee(): ontology=GAILA_PHASE_1_ONTOLOGY, chooser=RandomChooser.for_seed(0), max_to_sample=1, + block_multiple_of_the_same_type=True, ) ) @@ -169,6 +170,7 @@ def test_learner_as_default_addressee(): ontology=GAILA_PHASE_1_ONTOLOGY, chooser=RandomChooser.for_seed(0), max_to_sample=1, + block_multiple_of_the_same_type=True, ) ) @@ -178,6 +180,7 @@ def test_learner_as_default_addressee(): ontology=GAILA_PHASE_1_ONTOLOGY, chooser=RandomChooser.for_seed(0), max_to_sample=1, + block_multiple_of_the_same_type=True, ) ) @@ -233,6 +236,7 @@ def test_before_after_relations_asserted(): ontology=GAILA_PHASE_1_ONTOLOGY, chooser=RandomChooser.for_seed(0), max_to_sample=1, + block_multiple_of_the_same_type=True, ) ) From 460a2ad8275bea2e91106f2b4e3d0e953db0c03c Mon Sep 17 00:00:00 2001 From: paynesa Date: Thu, 13 Aug 2020 17:58:42 -0400 Subject: [PATCH 28/28] re-enable relative-to-learner size --- adam/ontology/phase1_ontology.py | 1 + ...semantics_situation_to_developmental_primitive_perception.py | 2 +- tests/curriculum/phase1_curriculum_test.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/adam/ontology/phase1_ontology.py b/adam/ontology/phase1_ontology.py index 382d64827..16591df64 100644 --- a/adam/ontology/phase1_ontology.py +++ b/adam/ontology/phase1_ontology.py @@ -679,6 +679,7 @@ def _far_region_factory( https://github.com/isi-vista/adam/issues/70 """ subtype(SMALLER_THAN_SAME_TYPE, SIZE_RELATION) +subtype(SMALLER_THAN, SIZE_RELATION) bigger_than = make_opposite_dsl_relation( # pylint:disable=invalid-name BIGGER_THAN, opposite_type=SMALLER_THAN diff --git a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py index 4d285d0c4..3141133e6 100644 --- a/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py +++ b/adam/perception/high_level_semantics_situation_to_developmental_primitive_perception.py @@ -305,7 +305,7 @@ def _real_do(self) -> PerceptualRepresentation[DevelopmentalPrimitivePerceptionF # Once all the objects and relations are perceived, determine their colors. self._perceive_colors() # Handle implicit size relations - # self._perceive_size_relative_to_learner() + self._perceive_size_relative_to_learner() self._perceive_all_relative_size() # for now, we assume that actions do not alter the relationship of objects axes diff --git a/tests/curriculum/phase1_curriculum_test.py b/tests/curriculum/phase1_curriculum_test.py index 08c9d4de8..837d4d3ca 100644 --- a/tests/curriculum/phase1_curriculum_test.py +++ b/tests/curriculum/phase1_curriculum_test.py @@ -50,7 +50,7 @@ def curriculum_test(curriculum: Phase1InstanceGroup) -> None: # we don't need to do anything # the curriculum may be dynamically generated # so we just want to test we can instantiate it - print(_) + pass @pytest.mark.parametrize(