-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbibtex.bib
2564 lines (2331 loc) · 179 KB
/
bibtex.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{music2017control,
abstract = {The interaction between humans and robot teams is highly relevant in many application domains, for example in collaborative manufacturing, search and rescue, and logistics. It is well-known that humans and robots have complementary capabilities: Humans are excellent in reasoning and planning in unstructured environments, while robots are very good in performing tasks repetitively and precisely. In consequence, one of the key research questions is how to combine human and robot team decision making and task execution},
author = {Musi{\'c}, Selma and Hirche, Sandra},
journal = {Annual Reviews in Control},
pages = {342--354},
pub_year = {2017},
publisher = {Elsevier},
title = {Control sharing in human-robot team interaction},
venue = {Annual Reviews in Control},
volume = {44}
}
@inproceedings{cencen2015characterizing,
abstract = {The industry is working towards manufacturing systems consisting of a blend of humans and robots. We look at the development of these systems in the context of Small and Medium Enterprises (SME). Also, it is believed that industrial robots with collaboration capabilities with humans will play a crucial role in the change towards reconfigurable and flexible manufacturing systems. Collaboration and teaming are natural social skills of humans. However, little is known about robots and their capabilities in working efficiently with these},
author = {Cencen, Argun and Verlinden, Jouke and Geraedts, Jo},
booktitle = {International Conference on Social Robotics},
organization = {Springer},
pages = {135--144},
pub_year = {2015},
title = {Characterizing the state of the art of human-robot coproduction},
venue = {International Conference on Social …}
}
@inproceedings{pfrommer2013pprs,
abstract = {To model increasingly adaptive production systems, skills are used to describe generic capabilities of the system components. In this paper, the authors extend the well-known division of production entities into product, process, and resource (PPR) with a skill definition. There are two main advantages for this approach: First, using PPR for the skill definition allows easy integration into existing models and tools. Second, there is a natural tendency to define very generic skills to capture all possible use cases. But at some point},
author = {Pfrommer, Julius and Schleipen, Miriam and Beyerer, J{\"u}rgen},
booktitle = {2013 IEEE 18th Conference on Emerging Technologies \& Factory Automation (ETFA)},
organization = {IEEE},
pages = {1--4},
pub_year = {2013},
title = {PPRS: Production skills and their relation to product, process, and resource},
venue = {2013 IEEE 18th …}
}
@article{backhaus2017digital,
abstract = {The ability to enable a fast modification and system-change, in order to fulfil quickly changing market needs, is one of the essential requirements of future production systems. Against this background, the central objective of this paper is the discussion of a new concept to simplify the application of task-oriented programming for assembly systems. For this purpose, a generic and comprehensible concept is used for the modeling of resources, processes and products. The core aspect is a method for the definition of multi-vendor skills},
author = {Backhaus, Julian and Reinhart, Gunther},
journal = {Journal of Intelligent Manufacturing},
number = {8},
pages = {1787--1800},
pub_year = {2017},
publisher = {Springer},
title = {Digital description of products, processes and resources for task-oriented programming of assembly systems},
venue = {Journal of Intelligent Manufacturing},
volume = {28}
}
@article{backhaus2015adaptive,
abstract = {The ability to enable a fast modification and system-change, in order to fulfil quickly changing market needs, is one of the essential requirements of future production systems. Two basic steps for an adaption of an automated production system to a new task are the reconfiguration and the reprogramming. Against this background, the central objective of this paper is the discussion of a new concept to simplify the application of task-oriented programming for assembly systems. Only abstract tasks are described in the task-oriented},
author = {Backhaus, J and Reinhart, G},
journal = {Procedia CIRP},
pages = {544--549},
pub_year = {2015},
publisher = {Elsevier},
title = {Adaptive and device independent planning module for task-oriented programming of assembly systems},
venue = {Procedia CIRP},
volume = {33}
}
@article{pedersen2016robot,
abstract = {Due to a general shift in manufacturing paradigm from mass production towards mass customization, reconfigurable automation technologies, such as robots, are required. However, current industrial robot solutions are notoriously difficult to program, leading to high changeover times when new products are introduced by manufacturers. In order to compete on global markets, the factories of tomorrow need complete production lines, including automation technologies that can effortlessly be reconfigured or repurposed, when},
author = {Pedersen, Mikkel Rath and Nalpantidis, Lazaros and Andersen, Rasmus Skovgaard and Schou, Casper and B{\o}gh, Simon and Kr{\"u}ger, Volker and Madsen, Ole},
journal = {Robotics and Computer-Integrated Manufacturing},
pages = {282--291},
pub_year = {2016},
publisher = {Elsevier},
title = {Robot skills for manufacturing: From concept to industrial deployment},
venue = {Robotics and Computer …},
volume = {37}
}
@inproceedings{andersen2014definition,
abstract = {We propose a hierarchical action framework which facilitates easy and intuitive robot instruction, allowing non-experts to instruct and use industrial robots. The framework is based on flexible, generic and hardware-independent robot Skills, which are executed through the use of a Robot Virtual Machine. We demonstrate the feasibility of our approach through case studies of real industrial tasks which are not automated today, due to the high cost of reconfiguration.},
author = {Andersen, Rasmus Hasle and Solund, Thomas and Hallam, John},
booktitle = {ISR/Robotik 2014; 41st International Symposium on Robotics},
organization = {VDE},
pages = {1--7},
pub_year = {2014},
title = {Definition and initial case-based evaluation of hardware-independent robot skills for industrial robotic co-workers},
venue = {ISR/Robotik 2014; 41st …}
}
@article{fechter2018integrated,
abstract = {The design of human-robot-collaborative workplaces is a challenging task, whose outcome is highly dependent on the assembly planners knowledge and experience, as usually only a small fraction of the design space is considered. This often results in unappropriated workplace designs with process-related and economical drawbacks. This paper outlines an approach to a collaborative workplace design tool-chain considering different strengths of robot and human, starting from assembly group CAD model data input to an ontology based},
author = {Fechter, Manuel and Seeber, Carsten and Chen, Shengjian},
journal = {Procedia CIRP},
pages = {39--44},
pub_year = {2018},
publisher = {Elsevier},
title = {Integrated process planning and resource allocation for collaborative robot workplace design},
venue = {Procedia CIRP},
volume = {72}
}
@inproceedings{schleipen2014automationml,
title={AutomationML to describe skills of production plants based on the PPR concept},
author={Schleipen, Miriam and Pfrommer, J and Aleksandrov, K and Stogl, D and Escaida, S and Beyerer, J and Hein, B},
booktitle={3rd AutomationML user conference},
year={2014}
}
@article{stenmark2015knowledge,
abstract = {When robots are working in dynamic environments, close to humans lacking extensive knowledge of robotics, there is a strong need to simplify the user interaction and make the system execute as autonomously as possible, as long as it is feasible. For industrial robots working side-by-side with humans in manufacturing industry, AI systems are necessary to lower the demand on programming time and system integration expertise. Only by building a system with appropriate knowledge and reasoning services can one simplify the robot},
author = {Stenmark, Maj and Malec, Jacek},
journal = {Robotics and Computer-Integrated Manufacturing},
pages = {56--67},
pub_year = {2015},
publisher = {Elsevier},
title = {Knowledge-based instruction of manipulation tasks for industrial robotics},
venue = {Robotics and Computer-Integrated Manufacturing},
volume = {33}
}
@inproceedings{zimmermann2019skill,
abstract = {The configuration of current automated production systems is complex and therefore time consuming while the market demands an easy setup and adaptability due to smaller batch sizes and volatile markets. While there are different concepts in research on how to simplify the engineering process by using generic skills or capabilities of devices, run-time control is still achieved with proprietary communication protocols and commands. The concept in this paper uses skills not only in the phase of engineering but also consequently for direct and},
author = {Zimmermann, Patrick and Axmann, Etienne and Brandenbourger, Benjamin and Dorofeev, Kirill and Mankowski, Andr{\'e} and Zanini, Paulo},
booktitle = {2019 24th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)},
organization = {IEEE},
pages = {1101--1108},
pub_year = {2019},
title = {Skill-based engineering and control on field-device-level with opc ua},
venue = {2019 24th IEEE …}
}
@inproceedings{zhang2009autonomous,
abstract = {General purpose service robots are expected to deal with many different tasks in unknown environments. The number of possible tasks and changing situations prevent developers from writing control programs for all tasks and possible situations. Complex robot tasks are thus accomplished by sequential execution of less complex robot actions that are triggered and configured by a task planner. The question of the appropriate abstraction level of robot actions is still being researched and not discussed conclusively. In this paper, we address},
author = {Zhang, Jianwei and others},
booktitle = {2009 IEEE/RSJ International Conference on Intelligent Robots and Systems},
organization = {IEEE},
pages = {1999--2004},
pub_year = {2009},
title = {Autonomous planning for mobile manipulation services based on multi-level robot skills},
venue = {2009 IEEE/RSJ International Conference on …}
}
@inproceedings{harcuba2015ontologies,
abstract = {This paper presents a design of ontology aimed at the optimization of production ramp-up of higly complex and customized products, developed within the European project ARUM. The ARUM solution consists of a large number of software components including schedulers, planners, legacy data transformation services and end-user client interfaces, which are integrated via the Enterprice Service Bus. The semantic interoperability is achieved by providing the common knowledge-based model defined as OWL ontology, which describes},
author = {Harcuba, Ond{\v{r}}ej and Vrba, Pavel},
booktitle = {2015 IEEE 20th Conference on Emerging Technologies \& Factory Automation (ETFA)},
organization = {IEEE},
pages = {1--8},
pub_year = {2015},
title = {Ontologies for flexible production systems},
venue = {2015 IEEE 20th Conference on Emerging …}
}
@inproceedings{hildebrandt2017semantic,
abstract = {Cooperation and collaboration of technical systems in different application contexts is one of the main targets of the vision of “Industrie 4.0”(I40). In order to bring this vision to industrial practice, it is necessary to define a semantic basis for information exchange between systems. This paper provides the results of a project that focused on semantic modeling based on standards relevant in the I40 context. Particularly, a meta model for modeling mechatronic systems as well as a meta model for modeling the systems characteristics is},
author = {Hildebrandt, Constantin and Scholz, Andr{\'e} and Fay, Alexander and Schr{\"o}der, Tizian and Hadlich, Thomas and Diedrich, Christian and Dubovy, Martin and Eck, Christian and Wiegand, Ralf},
booktitle = {2017 22nd IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)},
organization = {IEEE},
pages = {1--8},
pub_year = {2017},
title = {Semantic modeling for collaboration and cooperation of systems in the production domain},
venue = {2017 22nd IEEE …}
}
@inproceedings{kattepur2018knowledge,
abstract = {Robotic automation has made significant inroads into industrial manufacturing and supply chains. With Industry 4.0 requirements proposing further autonomy to robotic participants, it is necessary to reason about robotic tasks within a knowledge dependent software framework. In this work, we model robotic automation tasks using hierarchical decomposition models, that are used to extract action plans to satisfy end goals. By abstracting components as intelligent agents that have perception, action, goal and},
author = {Kattepur, Ajay and Dey, Sounak and Balamuralidhar, P},
booktitle = {IECON 2018-44th Annual Conference of the IEEE Industrial Electronics Society},
organization = {IEEE},
pages = {3665--3672},
pub_year = {2018},
title = {Knowledge based hierarchical decomposition of Industry 4.0 robotic automation tasks},
venue = {IECON 2018-44th Annual …}
}
@article{csahinel2021human,
abstract = {This study focuses on the question of how humans can be inherently integrated into cyber-physical systems (CPS) to reinforce their involvement in the increasingly automated industrial processes. After a use-case oriented review of the related research literature, a human-integration framework and associated data models are presented as part of a multi-agent IoT middleware called CHARIOT. The framework enables human actors to be semantically represented and registered, together with other IoT entities, in a common},
author = {{\c{S}}ahinel, Doruk and Akpolat, Cem and G{\"o}r{\"u}r, O Can and Sivrikaya, Fikret and Albayrak, Sahin},
journal = {Journal of Manufacturing Systems},
pages = {367--385},
pub_year = {2021},
publisher = {Elsevier},
title = {Human modeling and interaction in cyber-physical systems: A reference framework},
venue = {Journal of Manufacturing …},
volume = {59}
}
@article{ramasubramanian2021using,
abstract = {The operation of mobile manipulators in a collaborative environment needs to be adapted to the characteristics and skills of human operators. Human activity recognition, utilizing wearable sensors and vision systems, could be used to fine tune the performance of the mobile manipulator so that human operators be better assisted. The goal is to develop a sense of safety and trust between the human and the manipulator in order to improve the ergonomics of the operator within the collaborative workspace. This paper reviews the},
author = {Ramasubramanian, Aswin K and Aiman, Syed M and Papakostas, Nikolaos},
journal = {Procedia CIRP},
pages = {211--216},
pub_year = {2021},
publisher = {Elsevier},
title = {On using human activity recognition sensors to improve the performance of collaborative mobile manipulators: review and outlook},
venue = {Procedia CIRP},
volume = {97}
}
@article{barbosa2019knowledge,
abstract = {This paper aims to propose a novel knowledge tailor-made management method (KTMM) for industrial automation, to assist managers, project leaders and engineers interested on robotization of manufacturing. It looks for integrating the phases of analyzes, prospecting, development, deployment and evaluation of a robotized process. Specifically, this research focuses on the extraction of valuable knowledge, background and practical experience of the author in the field of industrial automation linked to literature review and motivated by the},
author = {Barbosa, Gustavo Franco and de Andrade Bezerra, Willian},
journal = {Journal of the Brazilian society of mechanical sciences and engineering},
number = {4},
pages = {1--14},
pub_year = {2019},
publisher = {Springer},
title = {A knowledge tailor-made method oriented to robotization of process: a case study of aeronautical materials’ drilling},
venue = {Journal of the Brazilian society of …},
volume = {41}
}
@inproceedings{hasnan2018short,
abstract = {The technological revolution or known as Industry 4.0, is a paradigm that envisages the use of sensors, machines, workpieces and IT system that is connected to the value chain beyond a single organization. As in other industries, the food processing sector is expected to embrace Industry 4.0 progressively. This paper presents the nine technological advancements that drive Industry 4.0 namely Big Data and analytics, autonomous robots, simulation, horizontal and vertical integration, cybersecurity, the Industrial Internet of Things},
author = {Hasnan, Noor Zafira Noor and Yusoff, Yuzainee Md},
booktitle = {2018 IEEE Student Conference on Research and Development (SCOReD)},
organization = {IEEE},
pages = {1--6},
pub_year = {2018},
title = {Short review: Application areas of industry 4.0 technologies in food processing sector},
venue = {2018 IEEE Student Conference on …}
}
@book{baldassarre2013computational,
abstract = {Computational and Robotic Models of the Hierarchical Organization of Behavior},
author = {Baldassarre, Gianluca and Mirolli, Marco},
pub_year = {2013},
publisher = {Springer},
title = {Computational and robotic models of the hierarchical organization of behavior},
venue = {NA}
}
@inproceedings{krushinitskiy2013review,
abstract = {This paper presents the analysis of automation hardware market, choosing and implementation of an optimal device to create remote supervision of workshops. An idea to create a remotely controllable workshop is not new, but the challenges with interaction scenarios between machines (CNC machines, industrial robots) and humans became more and more complex. Nowadays, industrial automation is mainly organized by microcontroller systems and the main disadvantages of such systems is focusing on specific tasks, needs of},
author = {Krushinitskiy, Pavel and Sziebig, Gabor},
booktitle = {2013 IEEE 4th International Conference on Cognitive Infocommunications (CogInfoCom)},
organization = {IEEE},
pages = {677--682},
pub_year = {2013},
title = {Review of open source computing devices for iSpace in production workshops},
venue = {2013 IEEE 4th International …}
}
@article{chen2009review,
abstract = {Purpose–Paint path planning for industrial robots is critical for uniform paint distribution, process cycle time and material waste, etc. However, paint path planning is still a costly and time‐consuming process. Currently paint path planning has always caused a bottle‐neck for manufacturing automation because typical manual teaching methods are tedious, error‐prone and skill‐dependent. Hence, it is essential to develop automated tool path‐planning methods to replace manual paint path planning. The purpose of this paper is to review the},
author = {Chen, Heping and Fuhlbrigge, Thomas and Li, Xiongzi},
journal = {Industrial Robot: An International Journal},
pub_year = {2009},
publisher = {Emerald Group Publishing Limited},
title = {A review of CAD-based robot path planning for spray painting},
venue = {Industrial Robot: An International Journal}
}
@article{rangraz2021workplace,
abstract = {Small manufacturing plants nowadays need to consider Industry 4.0 to stay competitive in the market. Among the challenges regarding the transformation towards Industry 4.0 are requirements to re-skill the staff for the new work environment. The staff have to either adapt to the workplace transformation brought by digitalisation, automation and robotics or face layoffs. This paper reports on a transformation process towards Industry 4.0 which was conducted in a small manufacturing enterprise where automated assembly line, industrial},
author = {Rangraz, Masood and Pareto, Lena},
journal = {International Journal of Lifelong Education},
number = {1},
pages = {5--22},
pub_year = {2021},
publisher = {Taylor \& Francis},
title = {Workplace work-integrated learning: supporting industry 4.0 transformation for small manufacturing plants by reskilling staff},
venue = {International Journal of Lifelong Education},
volume = {40}
}
@inproceedings{isnaini2019robotics,
title={Robotics-based learning to support computational thinking skills in early childhood},
author={Isnaini, Ratih and Budiyanto, Cucuk and Widiastuti, Indah},
booktitle={AIP Conference Proceedings},
volume={2194},
number={1},
pages={020044},
year={2019},
organization={AIP Publishing LLC}
}
@inproceedings{huckaby2012taxonomic,
title={A taxonomic framework for task modeling and knowledge transfer in manufacturing robotics},
author={Huckaby, Jacob O'Donnal and Christensen, Henrik I},
booktitle={Workshops at the Twenty-Sixth AAAI Conference on Artificial Intelligence},
year={2012}
}
@inproceedings{aertbelien2014etasl,
title={eTaSL/eTC: A constraint-based task specification language and robot controller using expression graphs},
author={Aertbeli{\"e}n, Erwin and De Schutter, Joris},
booktitle={2014 IEEE/RSJ International Conference on Intelligent Robots and Systems},
pages={1540--1546},
year={2014},
organization={IEEE}
}
@inproceedings{pane2020skill,
abstract = {This paper introduces a constraint-based skill framework for programming robot applications. Existing skill frameworks allow application developers to reuse skills and compose them sequentially or in parallel. However, they typically assume that the skills are running independently and in a nominal condition. This limitation hinders their applications for more involved and realistic scenarios eg when the skills need to run synchronously and in the presence of disturbances. This paper addresses this problem in two steps. First, we},
author = {Pane, Yudha and Aertbeli{\"e}n, Erwin and De Schutter, Joris and Decr{\'e}, Wilm},
booktitle = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
organization = {IEEE},
pages = {7087--7094},
pub_year = {2020},
title = {Skill-based programming framework for composable reactive robot behaviors},
venue = {2020 IEEE/RSJ …}
}
@inproceedings{thomas2013new,
title={A new skill based robot programming language using uml/p statecharts},
author={Thomas, Ulrike and Hirzinger, Gerd and Rumpe, Bernhard and Schulze, Christoph and Wortmann, Andreas},
booktitle={2013 IEEE International Conference on Robotics and Automation},
pages={461--466},
year={2013},
organization={IEEE}
}
@inproceedings{stenmark2016demonstrations,
title={From demonstrations to skills for high-level programming of industrial robots},
author={Stenmark, Maj and Topp, Elin A},
booktitle={2016 AAAI fall symposium series},
year={2016}
}
@inproceedings{nagele2018prototype,
abstract = {In recent years, a number of publications described approaches for model-based manipulation skills and their applicability to a variety of robot tasks-be it assembly, industrial robotics in general, or service robotics. These approaches roughly follow the same pattern: They model robot task description based on the Task Frame Formalism, the Task Function Approach, or iTaSC. They model coordination mechanisms in form of statecharts or Petri nets. And almost all models are accompanied by domain-specific languages (DSLs) that},
author = {N{\"a}gele, Frank and Halt, Lorenz and Tenbrock, Philipp and Pott, Andreas},
booktitle = {2018 IEEE International Conference on Robotics and Automation (ICRA)},
organization = {IEEE},
pages = {558--565},
pub_year = {2018},
title = {A prototype-based skill model for specifying robotic assembly tasks},
venue = {2018 IEEE International …}
}
@incollection{kroger2010manipulation,
abstract = {This paper introduces a generic framework for sensor-based robot motion control. The key contribution is the introduction of an adaptive selection matrix for sensor-based hybrid switched-system control. The overall control system consists of multiple sensors and open-and closed-loop controllers, in-between which the adaptive selection matrix can switch discretely in order to supply command variables for low-level controllers of robotic manipulators. How control signals are chosen, is specified by Manipulation Primitives, which},
author = {Kr{\"o}ger, Torsten and Finkemeyer, Bernd and Wahl, Friedrich M},
booktitle = {Robotic systems for handling and assembly},
pages = {293--313},
pub_year = {2010},
publisher = {Springer},
title = {Manipulation primitives—A universal interface between sensor-based motion control and robot programming},
venue = {Robotic systems for handling and …}
}
@article{zander2016model,
title={A model-driven engineering approach for ros using ontological semantics},
author={Zander, Stefan and Heppner, Georg and Neugschwandtner, Georg and Awad, Ramez and Essinger, Marc and Ahmed, Nadia},
journal={arXiv preprint arXiv:1601.03998},
year={2016}
}
@article{pane2021autonomous,
abstract = {Constraint-based robot programming allows for implementing sensor-based skills that react to disturbances on the one hand, and composable skills that dynamically create and reconfigure complex robot behaviors on the other hand. In this letter, we address a class of problems where composing appropriate skills prior to execution is inconvenient due to unpredictable events of disturbances. To this end, we propose an autonomous replanning and acting framework that computes and executes reactive composed skills at runtime. We},
author = {Pane, Yudha and Mokhtari, Vahid and Aertbeli{\"e}n, Erwin and De Schutter, Joris and Decr{\'e}, Wilm},
journal = {IEEE Robotics and Automation Letters},
number = {4},
pages = {6481--6488},
pub_year = {2021},
publisher = {IEEE},
title = {Autonomous Runtime Composition of Sensor-Based Skills Using Concurrent Task Planning},
venue = {IEEE Robotics and …},
volume = {6}
}
@article{zhang2021obstacle,
abstract = {Obstacle avoidance is a significant skill not only for mobile robots but also for robot manipulators working in unstructured environments. Various algorithms have been proposed to solve off-line planning and on-line adaption problems. However, it is still not able to ensure safety and flexibility in complex scenarios. In this paper, a novel obstacle avoidance algorithm is proposed to improve the robustness and flexibility. The method contains three components: A closed-loop control system is used to filter the preplanned},
author = {Zhang, Wei and Cheng, Hongtai and Hao, Lina and Li, Xingchen and Liu, Mingfang and Gao, Xifeng},
journal = {Robotics and Computer-Integrated Manufacturing},
pages = {102114},
pub_year = {2021},
publisher = {Elsevier},
title = {An obstacle avoidance algorithm for robot manipulators based on decision-making force},
venue = {Robotics and Computer …},
volume = {71}
}
@inproceedings{he2021research,
abstract = {In view of the traditional teaching mode on industrial for assembly tasks involved in the attitude control, poor adaptability of assembly process problem such as open-loop control, designing an Improved Deep Deterministic Policy Gradient (DDPG) reinforcement learning algorithm to realize the axle-hole assembly task. This method obtains multi-dimensional information, including the end-effector's posture parameters and the robot joints, adopts the hierarchical reward mechanism function, to train robot searching, positioning and attitude},
author = {He, Fujun and Wang, Xiaozheng and Liu, Kai},
booktitle = {2021 5th International Conference on Robotics and Automation Sciences (ICRAS)},
organization = {IEEE},
pages = {182--186},
pub_year = {2021},
title = {Research on Axle-Hole Assembly Method Based on Improved DDPG Algorithm},
venue = {2021 5th International Conference on …}
}
@article{ji2021learning,
abstract = {For smart manufacturing, an automated robotic assembly system built upon an autoprogramming environment is necessary to reduce setup time and cost for robots that are engaged in frequent task reassignment. This article presents an approach to the autoprogramming of robotic assembly tasks with minimal human assistance. The approach integrates “robotic learning of assembly tasks from observation” and “robotic embodiment of learned assembly tasks in the form of skills.” In the former, robots observe human assembly},
author = {Ji, Sanghoon and Lee, Sukhan and Yoo, Sujeong and Suh, Ilhong and Kwon, Inso and Park, Frank C and Lee, Sanghyoung and Kim, Hongseok},
journal = {Proceedings of the IEEE},
number = {4},
pages = {423--440},
pub_year = {2021},
publisher = {IEEE},
title = {Learning-based automation of robotic assembly for smart manufacturing},
venue = {Proceedings of the …},
volume = {109}
}
@inproceedings{pang2021synthesized,
abstract = {Human multi-robot system (MRS) collaboration is demonstrating potentials in wide application scenarios due to the integration of human cognitive skills and a robot team's powerful capability introduced by its multi-member structure. However, due to limited human cognitive capability, a human cannot simultaneously monitor multiple robots and identify the abnormal ones, largely limiting the efficiency of the human-MRS collaboration. There is an urgent need to proactively reduce unnecessary human engagements and further reduce},
author = {Pang, Yijiang and Huang, Chao and Liu, Rui},
booktitle = {2021 30th IEEE International Conference on Robot \& Human Interactive Communication (RO-MAN)},
organization = {IEEE},
pages = {778--783},
pub_year = {2021},
title = {Synthesized Trust Learning from Limited Human Feedback for Human-Load-Reduced Multi-Robot Deployments},
venue = {2021 30th IEEE International …}
}
@inproceedings{rahman2021performance,
abstract = {A human-robot collaborative system in the form of a power and skill assist robotic system was developed where a human and a robot could collaborate to perform object manipulation for targeted assembly tasks in automotive manufacturing. We assumed such assembly tasks as the representative assembly tasks in automotive manufacturing. We reflected human's weight perception in the dynamics and control of the power and skill assist system following a psychophysical method using a reinforcement learning scheme. We},
author = {Rahman, SM Mizanoor},
booktitle = {2021 IEEE International Workshop on Metrology for Automotive (MetroAutomotive)},
organization = {IEEE},
pages = {260--265},
pub_year = {2021},
title = {Performance metrics for human-robot collaboration: An automotive manufacturing case},
venue = {… IEEE International Workshop on Metrology for …}
}
@article{su2021toward,
abstract = {Learning manipulation skills from open surgery provides more flexible access to the organ targets in the abdomen cavity and this could make the surgical robot working in a highly intelligent and friendly manner. Teaching by demonstration (TbD) is capable of transferring the manipulation skills from human to humanoid robots by employing active learning of multiple demonstrated tasks. This work aims to transfer motion skills from multiple human demonstrations in open surgery to robot manipulators in robot-assisted minimally invasive},
author = {Su, Hang and Mariani, Andrea and Ovur, Salih Ertug and Menciassi, Arianna and Ferrigno, Giancarlo and De Momi, Elena},
journal = {IEEE Transactions on Automation Science and Engineering},
number = {2},
pages = {484--494},
pub_year = {2021},
publisher = {IEEE},
title = {Toward teaching by demonstration for robot-assisted minimally invasive surgery},
venue = {IEEE Transactions …},
volume = {18}
}
@article{wang2020framework,
abstract = {Human factors and human-centered design philosophy are highly desired in today's robotics applications such as human–robot interaction (HRI). Several studies showed that endowing robots of human-like interaction skills can not only make them more likeable but also improve their performance. In particular, skill transfer by imitation learning can increase the usability and acceptability of robots by users without computer programming skills. In fact, besides positional information, muscle stiffness of the human arm and contact force with the},
author = {Wang, Ning and Chen, Chuize and Di Nuovo, Alessandro},
journal = {IEEE Transactions on Cognitive and Developmental Systems},
number = {1},
pages = {162--170},
pub_year = {2020},
publisher = {IEEE},
title = {A framework of hybrid force/motion skills learning for robots},
venue = {IEEE Transactions on Cognitive …},
volume = {13}
}
@article{monforte2020reinforcement,
abstract = {Postural synergies allow a rich set of hand configurations to be represented in lower dimension space compared to the original joint space. In our previous works, we have shown that this can be extended to trajectories thanks to the multivariate functional principal component analysis, obtaining a set of basis functions able to represent grasping movements learned from human demonstration. In this article, we introduce a human cognition-inspired approach for generalizing and improving robot grasping skills in this},
author = {Monforte, Marco and Ficuciello, Fanny},
journal = {IEEE Transactions on Cognitive and Developmental Systems},
number = {1},
pages = {132--140},
pub_year = {2020},
publisher = {IEEE},
title = {A Reinforcement Learning Method Using Multifunctional Principal Component Analysis for Human-Like Grasping},
venue = {IEEE Transactions on Cognitive and …},
volume = {13}
}
@article{verner2021exploring,
abstract = {with the knowledge of smart connected robot systems. This paper presents the high-school enrichment program Intelligent Robotics and Smart Transportation, which implements an approach to teaching the concepts and skills of robot connectivity, collaborative sensing,},
author = {Verner, Igor M and Cuperman, Dan and Reitman, Michael},
journal = {Robotics},
number = {1},
pages = {13},
pub_year = {2021},
publisher = {Multidisciplinary Digital Publishing Institute},
title = {Exploring robot connectivity and collaborative sensing in a high-school enrichment program},
venue = {Robotics},
volume = {10}
}
@inproceedings{omer2021model,
abstract = {Dexterous multi-fingered robotic hands represent a promising solution for robotic manipulators to perform a wide range of complex tasks, through acquiring more general purpose skills. Nevertheless, developing complex behaviours for a robot needs sophisticated control strategies, which requires domain expertise with a good understanding of mathematics, and underlying physics, this will be very difficult for such complex robots. Learning algorithms like deep reinforcement learning provide a general framework for},
author = {Omer, Muhammad and Ahmed, Rami and Rosman, Benjamin and Babikir, Sharief F},
booktitle = {2020 International Conference on Computer, Control, Electrical, and Electronics Engineering (ICCCEEE)},
organization = {IEEE},
pages = {1--6},
pub_year = {2021},
title = {Model Predictive-Actor Critic Reinforcement Learning for Dexterous Manipulation},
venue = {… on Computer, Control …}
}
@article{bi2021safety,
abstract = {Collaborative robots (cobots) are robots that are designed to collaborate with humans in an open workspace. In contrast to industrial robots in an enclosed environment, cobots need additional mechanisms to assure humans' safety in collaborations. It is especially true when a cobot is used in manufacturing environment; since the workload or moving mass is usually large enough to hurt human when a contact occurs. In this article, we are interested in understanding the existing studies on cobots, and especially, the safety requirements, and},
author = {Bi, ZM and Luo, Chaomin and Miao, Zhonghua and Zhang, Bing and Zhang, WJ and Wang, Lihui},
journal = {Robotics and Computer-Integrated Manufacturing},
pages = {102022},
pub_year = {2021},
publisher = {Elsevier},
title = {Safety assurance mechanisms of collaborative robotic systems in manufacturing},
venue = {Robotics and Computer …},
volume = {67}
}
@article{kumar2021discovering,
abstract = {Exercising sensorimotor and cognitive functions allows humans, including infants, to interact with the environment and objects within it. In particular, during everyday activities, infants continuously enrich their repertoire of actions, and by playing, they experimentally plan such actions in sequences to achieve desired goals. The latter, reflected as perceptual target states, are built on previously acquired experiences shaped by infants to predict their actions. Imitating this, in developmental robotics, we seek methods that allow autonomous},
author = {Kumar, Suresh and Giagkos, Alexandros and Shaw, Patricia and Braud, Rapha{\"e}l and Lee, Mark and Shen, Qiang},
journal = {IEEE Transactions on Cognitive and Developmental Systems},
pub_year = {2021},
publisher = {IEEE},
title = {Discovering Schema-based Action Sequences through Play in Situated Humanoid Robots},
venue = {… on Cognitive and …}
}
@article{martin2021generic,
abstract = {To meet the demands of a rising population greenhouses must face the challenge of producing more in a more efficient and sustainable way. Innovative mobile robotic solutions with flexible navigation and manipulation strategies can help monitor the field in real-time. Guided by Integrated Pest Management strategies, robots can perform early pest detection and selective treatment tasks autonomously. However, combining the different robotic skills is an error prone work that requires experience in many robotic fields, usually deriving on ad},
author = {Martin, Jon and Ansuategi, Ander and Maurtua, I{\~n}aki and Gutierrez, Aitor and Obreg{\'o}n, David and Casquero, Oskar and Marcos, Marga},
journal = {IEEE Access},
pages = {94981--94995},
pub_year = {2021},
publisher = {IEEE},
title = {A Generic ROS-Based Control Architecture for Pest Inspection and Treatment in Greenhouses Using a Mobile Manipulator},
venue = {IEEE …},
volume = {9}
}
@article{dong2021deep,
abstract = {Humanoid robots are being developed to play the role of personal assistants. With the development of artificial intelligence technology, humanoid robots are expected to perform many human tasks, such as housework, human care, and even medical treatment. However, robots cannot currently move flexibly like humans, which affects their fine motor skill performance. This is primarily because traditional robot control methods use manipulators that are difficult to articulate well. To solve this problem, we propose a nonlinear realistic},
author = {Dong, Ran and Chang, Qiong and Ikuno, Soichiro},
journal = {Neural Computing and Applications},
pages = {1--14},
pub_year = {2021},
publisher = {Springer},
title = {A deep learning framework for realistic robot motion generation},
venue = {Neural Computing and Applications}
}
@article{ali2021lead,
abstract = {Welding is a complex manufacturing process. Its quality depends on the welder skills, especially in welding complex paths. For consistency in modern industries, the arm robot is used to accomplish this task. However, its programming and reprogramming are time consuming and costly and need an expert programmer. These limit the use of robots in medium and small industries. This paper introduces a new supervised learning technique for programming a 4-degree of freedom (DOF) welding arm robot with an automatic feeding},
author = {Ali, Mohamed Hosni Mohamed and Atia, Mostafa Rostom},
journal = {Robotica},
pages = {1--11},
pub_year = {2021},
publisher = {Cambridge University Press},
title = {A lead through approach for programming a welding arm robot using machine vision},
venue = {Robotica}
}
@inproceedings{radicchi2021helping,
abstract = {The creation of robotic systems demands the formalization of how-to-do rules. However, professional workers interact with the world in a way that goes beyond formal rules, such as when facing unforeseen and context-dependent events. The solution to replace human tasks by robotic systems consists in the creation of “micro-worlds”, which presuppose controlled environments, with fixed rules, in which robots are able to operate successfully. Accordingly, such micro-worlds must be designed, built, supervised, maintained and},
author = {Radicchi, Natalia and Detoie, Luciana and Ribeiro, Rodrigo and Lima, Francisco},
booktitle = {Congress of the International Ergonomics Association},
organization = {Springer},
pages = {256--264},
pub_year = {2021},
title = {Helping a Robot to be “Autonomous”: The Expertise of a (Human) Roboticist in a Manufacturing Plant},
venue = {Congress of the International …}
}
@inproceedings{rodic2021cloud,
abstract = {The paper presents a new prototype of a bi-manual, collaborative service robot for industrial purpose designed for use in the so–called customized production within the Industry 4.0 initiative. The robot is a significant technological means of supporting Small and Medium-sized Enterprises (SMEs) engaged in the manufacturing of products intended for the market. The bi-manual collaborative robot has the features of enhanced operational versatility and flexibility within technological production, thanks to a specific mechanical construction with},
author = {Rodi{\'c}, Aleksandar and {\v{S}}umarac, Jovan and Stevanovi{\'c}, Ilija and Jovanovi{\'c}, Milo{\v{s}}},
booktitle = {International Conference on Robotics in Alpe-Adria Danube Region},
organization = {Springer},
pages = {240--249},
pub_year = {2021},
title = {Cloud-Enabled Bi-manual Collaborative Robot with Enhanced Versatility for Customized Production},
venue = {… Conference on Robotics in …}
}
@inproceedings{profanter2019hardware,
abstract = {The current trend to lot-size-one production requires reduced integration effort and easy reuse of available devices inside the production line. These devices have to offer a uniform interface to fulfill these requirements. This paper presents a hardware-agnostic skill model using the semantic modeling capabilities of OPC UA. The model provides a standardized interface to hardware or software functionality while offering an intuitive way of grouping multiple skills to a higher hierarchical abstraction. Our skill model is based on OPC UA},
author = {Profanter, Stefan and Breitkreuz, Ari and Rickert, Markus and Knoll, Alois},
booktitle = {2019 24th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)},
organization = {IEEE},
pages = {1061--1068},
pub_year = {2019},
title = {A hardware-agnostic OPC UA skill model for robot manipulators and tools},
venue = {2019 24th IEEE …}
}
@inproceedings{calderon2010teaching,
abstract = {Learning by imitation represent a useful and promising alternative to programming robots. This paper presents an approach for learning by imitation based on two functional elements used by humans to understand and perform actions. One is the representation of the body that contains information of the body's capabilities. The other one is a snapshot of the body and its relation with the environment at a given instant. These elements are believed to interact to generate among other abilities, the ability to imitate. Learning new tasks is then},
author = {Calderon, Carlos A Acosta and Mohan, Rajesh E and Zhou, Changjiu},
booktitle = {2010 IEEE Conference on Robotics, Automation and Mechatronics},
organization = {IEEE},
pages = {256--262},
pub_year = {2010},
title = {Teaching new tricks to a robot learning to solve a task by imitation},
venue = {2010 IEEE Conference on …}
}
@inproceedings{aein2013toward,
abstract = {The goal of this study is to provide an architecture for a generic definition of robot manipulation actions. We emphasize that the representation of actions presented here is “procedural”. Thus, we will define the structural elements of our action representations as execution protocols. To achieve this, manipulations are defined using three levels. The toplevel defines objects, their relations and the actions in an abstract and symbolic way. A mid-level sequencer, with which the action primitives are chained, is used to structure the},
author = {Aein, Mohamad Javad and Aksoy, Eren Erdal and Tamosiunaite, Minija and Papon, Jeremie and Ude, Ales and W{\"o}rg{\"o}tter, Florentin},
booktitle = {2013 IEEE/RSJ International Conference on Intelligent Robots and Systems},
organization = {IEEE},
pages = {4555--4562},
pub_year = {2013},
title = {Toward a library of manipulation actions based on semantic object-action relations},
venue = {2013 IEEE/RSJ …}
}
@inproceedings{ahad2010action,
abstract = {Human action understanding and analysis for various applications are still in infancy due to various factors. In this paper, for recognizing various complex activities, a combined cue for motion representation and later recognition is demonstrated based on the optical flow-based four directional motion history and basic energy images. Optical flow between consecutive frames are computed to create the update function and to segment the moving regions. These motion vectors are split into four different channels. From these channels, the},
author = {Ahad, Md Atiqur Rahman and Tan, J and Kim, Hyoungseop and Ishikawa, Seiji},
booktitle = {2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition-Workshops},
organization = {IEEE},
pages = {73--78},
pub_year = {2010},
title = {Action recognition by employing combined directional motion history and energy images},
venue = {2010 IEEE Computer …}
}
@article{hwang2017seamless,
abstract = {This paper investigates how adequate coordination among the different cognitive processes of a humanoid robot can be developed through end-to-end learning of direct perception of visuomotor stream. We propose a deep dynamic neural network model built on a dynamic vision network, a motor generation network, and a higher-level network. The proposed model was designed to process and to integrate direct perception of dynamic visuomotor patterns in a hierarchical model characterized by different spatial and temporal constraints},
author = {Hwang, Jungsik and Tani, Jun},
journal = {IEEE Transactions on Cognitive and Developmental Systems},
number = {2},
pages = {345--358},
pub_year = {2017},
publisher = {IEEE},
title = {Seamless integration and coordination of cognitive skills in humanoid robots: A deep learning approach},
venue = {IEEE Transactions on Cognitive and …},
volume = {10}
}
@article{ji2018one,
abstract = {In a natural and accessible Human Robot Interaction (HRI), it is required to understand human activities instantly. In this paper, we present a novel approach for early recognition of human actions. Using reinforcement learning, we separate human action to several patterns and learn pattern transition maps which include temporal ordered patterns and their transition relationships in action sequences. Due to the difficulty of pattern separation and definition in large quantity of action sequences for training, we adopt one-shot learning to},
author = {Ji, Yanli and Yang, Yang and Xu, Xing and Shen, Heng Tao},
journal = {Signal Processing},
pages = {364--370},
pub_year = {2018},
publisher = {Elsevier},
title = {One-shot learning based pattern transition map for action early recognition},
venue = {Signal Processing},
volume = {143}
}
@article{madarash2004enhancing,
abstract = {The IEEE Xplore online delivery system provides researchers at science and engineering libraries with the full text PDF files of IEEE conference proceedings. The University of Akron recently added full text PDF hotlinks to the print records of both serial and monographic IEEE},
author = {Madarash-Hill, Cherie and Hill, JB},
journal = {Science \& Technology Libraries},
number = {3-4},
pages = {389--399},
pub_year = {2004},
publisher = {Taylor \& Francis},
title = {Enhancing access to ieee conference proceedings: a case study in the application of ieee xplore full text and table of contents enhancements},
venue = {Science & Technology Libraries},
volume = {24}
}
@article{zimmer2017bootstrapping,
title={Bootstrapping $ q $-learning for robotics from neuro-evolution results},
author={Zimmer, Matthieu and Doncieux, Stephane},
journal={IEEE Transactions on Cognitive and Developmental Systems},
volume={10},
number={1},
pages={102--119},
year={2017},
publisher={IEEE}
}
@article{dindo2017hankelet,
abstract = {Powered lower-limb prostheses require a natural, and an easy-to-use, interface for communicating amputee's motor intention in order to select the appropriate motor program in any given context, or simply to commute from active (powered) to passive mode of functioning. To be widely accepted, such an interface should not put additional cognitive load at the end-user, it should be reliable and minimally invasive. In this paper we present a one such interface based on a robust method for detecting and recognizing motor actions},
author = {Dindo, Haris and Presti, Liliana Lo and La Cascia, Marco and Chella, Antonio and Dedi{\'c}, Remzo},
journal = {Robotics and Autonomous Systems},
pages = {120--133},
pub_year = {2017},
publisher = {Elsevier},
title = {Hankelet-based action classification for motor intention recognition},
venue = {Robotics and …},
volume = {94}
}
@article{hardwick2017neural,
abstract = {There is longstanding interest in the relationship between motor imagery, action observation, and movement execution. Several models propose that these tasks recruit the same brain regions in a similar manner; however, there is no quantitative synthesis of the literature that compares their respective networks. Here we summarized data from neuroimaging experiments examining Motor Imagery (303 experiments, 4,902 participants), Action Observation (595 experiments, 11,032 participants), and related control tasks},
author = {Hardwick, Robert M and Caspers, Svenja and Eickhoff, Simon B and Swinnen, Stephan P},
journal = {BioRxiv},
pages = {198432},
pub_year = {2017},
publisher = {Cold Spring Harbor Laboratory},
title = {Neural correlates of motor imagery, action observation, and movement execution: a comparison across quantitative meta-analyses},
venue = {BioRxiv}
}
@inproceedings{ahmadzadeh2015learning,
abstract = {In this paper, a robot learning approach is proposed which integrates Visuospatial Skill Learning, Imitation Learning, and conventional planning methods. In our approach, the sensorimotor skills (ie, actions) are learned through a learning from demonstration strategy. The sequence of performed actions is learned through demonstrations using Visuospatial Skill Learning. A standard action-level planner is used to represent a symbolic description of the skill, which allows the system to represent the skill in a discrete, symbolic form. The},
author = {Ahmadzadeh, Seyed Reza and Paikan, Ali and Mastrogiovanni, Fulvio and Natale, Lorenzo and Kormushev, Petar and Caldwell, Darwin G},
booktitle = {2015 IEEE International Conference on Robotics and Automation (ICRA)},
organization = {IEEE},
pages = {3801--3808},
pub_year = {2015},
title = {Learning symbolic representations of actions from human demonstrations},
venue = {… on Robotics and …}
}
@inproceedings{grave2012incremental,
abstract = {The ability to recognize human actions is a fundamental problem in many areas of robotics research concerned with human-robot interaction or learning from human demonstration. In this paper, we present a new integrated approach to identifying and recognizing actions in human movement sequences and their reproduction in unknown situations. We propose a set of task-space features to construct probabilistic models of action classes. Based on this representation, we suggest a combined segmentation and classification algorithm which},
author = {Gr{\"a}ve, Kathrin and Behnke, Sven},
booktitle = {2012 IEEE/RSJ International Conference on Intelligent Robots and Systems},
organization = {IEEE},
pages = {751--757},
pub_year = {2012},
title = {Incremental action recognition and generalizing motion generation based on goal-directed features},
venue = {2012 IEEE/RSJ International Conference …}
}
@inproceedings{nakawala2018approaches,
abstract = {Robust representation of actions and its sequences for complex robotic tasks would transform robot's understanding to execute robotic tasks efficiently. The challenge is to understand action sequences for highly unstructured environments and to represent and construct action and action sequences. In this manuscript, we present a review of literature dealing with representation of action and action sequences for robot task planning and execution. The methodological review was conducted using Google Scholar and IEEE},
author = {Nakawala, Hirenkumar and Goncalves, Paulo JS and Fiorini, Paolo and Ferringo, Giancarlo and De Momi, Elena},
booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
organization = {IEEE},
pages = {5666--5671},
pub_year = {2018},
title = {Approaches for action sequence representation in robotics: A review},
venue = {2018 IEEE/RSJ …}
}
@article{akkaladevi2019skill,
abstract = {In recent years, a paradigm shift is underway as robots leave their typical application field and move into domains that have been untouched by robotic automation. These new kinds of automation systems allow more product variations, smaller life cycles, smaller batch sizes and pave the way from mass production to mass customization. This is due to completely new breed of safe robot technology but also novel ways of setting up new applications like eg kinesthetic programming. However, the topic of reducing the programming effort for},
author = {Akkaladevi, Sharath Chandra and Pichler, Andreas and Plasch, Matthias and Ikeda, Markus and Hofmann, Michael},
journal = {e \& i Elektrotechnik und Informationstechnik},
number = {7},
pages = {326--333},
pub_year = {2019},
publisher = {Springer},
title = {Skill-based programming of complex robotic assembly tasks for industrial application},
venue = {e & i Elektrotechnik und …},
volume = {136}
}
@article{steinmetz2019intuitive,
abstract = {Intuitive robot programming for non-experts will be essential to increasing automation in small and medium-sized enterprises. Programming by demonstration (PbD) is a fast and intuitive approach, whereas programs created with task-level programming (TLP) are easy to understand and flexible in their execution. In this letter, we propose an approach which combines these complementary advantages of PbD and TLP. Users define complete task-level programs including all parameters through PbD alone. Therefore, we call this},
author = {Steinmetz, Franz and Nitsch, Verena and Stulp, Freek},
journal = {IEEE Robotics and Automation Letters},
number = {4},
pages = {3742--3749},
pub_year = {2019},
publisher = {IEEE},
title = {Intuitive task-level programming by demonstration through semantic skill recognition},
venue = {IEEE Robotics and Automation …},
volume = {4}
}
@inproceedings{lindorfer2019towards,
abstract = {Upcoming trends in manufacturing focus on flexible configuration of products up to lot-size one. This mass customization raises a need of flexible manufacturing and production systems that should react on fast changing market demands. As a result, it is getting more and more important to model all needed assets or skills in an user-oriented and configurable manner. Assets can represent any type of data or skill information needed in a manufacturing domain. Data information could be for example shown to a human worker (eg},
author = {Lindorfer, Ren{\'e} and Froschauer, Roman},
booktitle = {2019 IEEE 17th International Conference on Industrial Informatics (INDIN)},
organization = {IEEE},
pages = {655--660},
pub_year = {2019},
title = {Towards user-oriented programming of skill-based automation systems using a domain-specific meta-modeling approach},
venue = {2019 IEEE 17th International …},
volume = {1}
}
@article{krueger2019testing,
abstract = {In recent years, cognitive robots have started to find their way into manufacturing halls. However, the full potential of these robots can only be exploited through (a) an integration of the robots with the Manufacturing Execution System (MES),(b) a new and simpler way of programming based on robot skills, automated task planning, and knowledge modeling, and (c) enabling the robots to function in a shared human/robot workspace with the ability to handle unexpected situations. The STAMINA project has built a robotic system that meets},
author = {Krueger, Volker and Rovida, Francesco and Grossmann, Bjarne and Petrick, Ronald and Crosby, Matthew and Charzoule, Arnaud and Garcia, German Martin and Behnke, Sven and Toscano, Cesar and Veiga, Germano},
journal = {Robotics and computer-integrated manufacturing},
pages = {213--229},
pub_year = {2019},
publisher = {Elsevier},
title = {Testing the vertical and cyber-physical integration of cognitive robots in manufacturing},
venue = {Robotics and computer …},
volume = {57}
}
@inproceedings{scherzinger2019contact,
abstract = {Robotic automation is a key driver for the advancement of technology. The skills of human workers, however, are difficult to program and seem currently unmatched by technical systems. In this work we present a data-driven approach to extract and learn robot-independent contact skills from human demonstrations in simulation environments, using a Long Short Term Memory (LSTM) network. Our model learns to generate error-correcting sequences of forces and torques in task space from object-relative motion, which industrial},
author = {Scherzinger, Stefan and Roennau, Arne and Dillmann, R{\"u}diger},
booktitle = {2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
organization = {IEEE},
pages = {4309--4316},
pub_year = {2019},
title = {Contact skill imitation learning for robot-independent assembly programming},
venue = {2019 IEEE/RSJ …}
}
@inproceedings{johannsmeier2019framework,
abstract = {In this paper we introduce a novel framework for expressing and learning force-sensitive robot manipulation skills. It is based on a formalism that extends our previous work on adaptive impedance control with meta parameter learning and compatible skill specifications. This way the system is also able to make use of abstract expert knowledge by incorporating process descriptions and quality evaluation metrics. We evaluate various state-of-the-art schemes for meta parameter learning and experimentally compare selected ones},
author = {Johannsmeier, Lars and Gerchow, Malkin and Haddadin, Sami},
booktitle = {2019 International Conference on Robotics and Automation (ICRA)},
organization = {IEEE},
pages = {5844--5850},
pub_year = {2019},
title = {A framework for robot manipulation: Skill formalism, meta learning and adaptive control},
venue = {… Conference on Robotics …}
}
@inproceedings{nagele2019composition,
abstract = {Skill-based approaches for programming robots promise many benefits such as easier reuse of functionality across applications, encapsulation and hiding of process details, and often a certain level of hardware abstraction. While many previous approaches allow to compose high-level skills, the basic skills themselves are usually atomic, non-extensible, or are supposed to be created by experts beforehand. We designed a prototype-based skill model to create forcecontrolled manipulation skills for robotic assembly tasks. In this paper},
author = {N{\"a}gele, Frank and Halt, Lorenz and Tenbrock, Philipp and Pott, Andreas},
booktitle = {2019 Third IEEE International Conference on Robotic Computing (IRC)},
organization = {IEEE},
pages = {177--182},
pub_year = {2019},
title = {Composition and incremental refinement of skill models for robotic assembly tasks},
venue = {2019 Third IEEE …}
}
@article{angleraud2019teaching,
abstract = {Recent advances in robotics allow for collaboration between humans and machines in performing tasks at home or in industrial settings without harming the life of the user. While humans can easily adapt to each other and work in team, it is not as trivial for robots. In their case, interaction skills typically come at the cost of extensive programming and teaching. Besides, understanding the semantics of a task is necessary to work efficiently and react to changes in the task execution process. As a result, in order to achieve seamless},
author = {Angleraud, Alexandre and Houbre, Quentin and Pieters, Roel},
journal = {Paladyn, Journal of Behavioral Robotics},
number = {1},
pages = {318--329},
pub_year = {2019},
publisher = {Sciendo},
title = {Teaching semantics and skills for human-robot collaboration},
venue = {Paladyn, Journal of Behavioral …},
volume = {10}
}
@article{al2019symbolic,
abstract = {This paper investigates a symbolic-based CS recognition approach which combines feature transformation methods, ie, Principal reduce the computational effort required for CS recognition. As a validation example, the PiH assembly was adopted to demonstrate the efficiency},
author = {Al-Yacoub, Ali and Zhao, Yuchen and Lohse, Niels and Goh, Mey and Kinnell, Peter and Ferreira, Pedro and Hubbard, Ella-Mae},
journal = {Frontiers in Robotics and AI},
pages = {99},
pub_year = {2019},
publisher = {Frontiers},
title = {Symbolic-based recognition of contact states for learning assembly skills},
venue = {Frontiers in Robotics …}
}
@inproceedings{pohlt2019human,
abstract = {Collaboration between robots and humans requires communicative skills on both sides. The robot has to understand the conscious and unconscious activities of human workers. Many state-of-the-art activity recognition algorithms with high performance rates on existing benchmark datasets are available for this task. This paper re-evaluates appropriate architectures in light of human work activity recognition for working cells in industrial production contexts. The specific constraints of such a domain is elaborated and used as},
author = {Pohlt, Clemens and Schlegl, Thomas and Wachsmuth, Sven},
booktitle = {2019 IEEE International Conference on Systems, Man and Cybernetics (SMC)},
organization = {IEEE},
pages = {4225--4230},
pub_year = {2019},
title = {Human work activity recognition for working cells in industrial production contexts},
venue = {2019 IEEE International …}
}
@inproceedings{ochoa2019control,
abstract = {This paper describes a computed-torque control architecture for robotic-assisted polishing tasks based on human demonstration. An impedance control approach for both robot position and orientation is investigated without explicit force sensing, relying on compliant frames relative to the end-effector and on posture optimization in the null space. For the orientation control the approach relies on rotation matrix properties connected to the axis-angle formulation, having no singularity problems. Additionally, we also propose a robust},
author = {Ochoa, H{\'e}lio and Cortes{\~a}o, Rui},
booktitle = {IECON 2019-45th Annual Conference of the IEEE Industrial Electronics Society},
organization = {IEEE},
pages = {630--637},
pub_year = {2019},
title = {Control architecture for robotic-assisted polishing tasks based on human skills},
venue = {IECON 2019-45th Annual Conference of …},
volume = {1}
}
@inproceedings{jaensch2019reinforcement,
abstract = {This paper introduces a method for automatic robot programming of industrial robots using reinforcement learning on a Software-in-the-loop simulation. The focus of the the paper is on the higher levels of a hierarchical robot programming problem. While the lower levels the skills are stored as domain specific program code, the combination of the skills into a robot control program to solve a specific task is automated. The reinforcement learning learning approach allows the shopfloor workers and technicians just to define the end result of the},
author = {Jaensch, Florian and Csiszar, Akos and Sarbandi, Janik and Verl, Alexander},
booktitle = {2019 Second International Conference on Artificial Intelligence for Industries (AI4I)},
organization = {IEEE},
pages = {79--84},
pub_year = {2019},
title = {Reinforcement learning of a robot cell control logic using a software-in-the-loop simulation as environment},
venue = {… Conference on Artificial …}
}
@inproceedings{zhen2019adaptive,
abstract = {Many industry manufacturing processes require a lot of manpower to accomplish tasks manually, for example, manual polishing and masking. Industrial robot can be used to replace most of the tedious and repeated tasks. However, using robot program to generate the tool path for the manufacturing process might need programming skills and expertise. Besides, Computer Aided Design (CAD) files might not be available or accurate for the engineer to design the robot tool path. Hence, we propose an automatic way to generate the},
author = {Zhen, Xie and Seng, Josh Chen Ye and Somani, Nikhil},
booktitle = {2019 24th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)},
organization = {IEEE},
pages = {341--347},
pub_year = {2019},
title = {Adaptive automatic robot tool path generation based on point cloud projection algorithm},
venue = {2019 24th IEEE International …}
}
@article{malik2019complexity,
abstract = {Purpose Over the past years, collaborative robots have been introduced as a new generation of industrial robotics working alongside humans to share the workload. These robots have the potential to enable human–robot collaboration (HRC) for flexible automation. However, the deployment of these robots in industrial environments, particularly in assembly, still comprises several challenges, of which one is skills-based tasks distribution between humans and robots. With ever-decreasing product life cycles and high},
author = {Malik, Ali Ahmad and Bilberg, Arne},
journal = {Industrial Robot: the international journal of robotics research and application},
pub_year = {2019},
publisher = {Emerald Publishing Limited},
title = {Complexity-based task allocation in human-robot collaborative assembly},
venue = {Industrial Robot: the international journal of …}
}
@inproceedings{zhao2019robot,
abstract = {Industrial robots have been widely used in various production line but not in the 3C assembly line which is characterized by highly frequent change-over and still relies on labor heavily. To deal with this challenge, the Programming by Demonstration (PbD) method, which provides a simple and intuitive way for workers to transfer skills to the robot, will greatly reduces professional knowledge requirements for robot users. However, some practical factors in 3C assembly line, such as the mechanical diversities between the human},
author = {Zhao, Zhilong and Hu, Haopeng and Yang, Xiansheng and Lou, Yunjiang},
booktitle = {2019 WRC Symposium on Advanced Robotics and Automation (WRC SARA)},
organization = {IEEE},
pages = {172--177},
pub_year = {2019},
title = {A Robot Programming by Demonstration Method for Precise Manipulation in 3C Assembly},
venue = {2019 WRC Symposium on …}
}
@inproceedings{cao2019robot,
abstract = {With the development of the Internet and computer technology, the demand for 3C products such as mobile phones has surged. At the same time, due to the continuous improvement of labor costs, it is urgent to automate the 3C assembly lines with industrial robots. The motivation of this paper is to propose an efficient off-line programming by demonstration method to automate 3C assembly lines. This process consists of two phases. In the first phase, the optical motion capture device is used to capture the position and orientation},
author = {Cao, Zhiqi and Hu, Haopeng and Yang, Xiansheng and Lou, Yunjiang},
booktitle = {2019 WRC Symposium on Advanced Robotics and Automation (WRC SARA)},
organization = {IEEE},
pages = {13--18},
pub_year = {2019},
title = {A robot 3C assembly skill learning method by intuitive human assembly demonstration},
venue = {2019 WRC Symposium on …}
}
@inproceedings{lazaro2019approach,
abstract = {One of the major objectives of international projects in the field of Industrial Automation is to achieve a proper and safe human-robot collaboration. This will permit the coexistence of both humans and robots at factory shop floors, where each one has a clear role along the industrial processes. It's a matter of fact that machines, including robots, have specific features that determine the kind of operation (s) that they can perform better. Similarly, human operators have a set of skills and knowledge that permits them to accomplish their},
author = {L{\'a}zaro, Olatz De Miguel and Mohammed, Wael M and Ferrer, Borja Ramis and Bejarano, Ronal and Lastra, Jose L Martinez},
booktitle = {2019 IEEE 17th International Conference on Industrial Informatics (INDIN)},
organization = {IEEE},
pages = {789--794},
pub_year = {2019},
title = {An approach for adapting a cobot workstation to human operator within a deep learning camera},
venue = {2019 IEEE 17th …},
volume = {1}
}
@inproceedings{yuan2019mirrored,
abstract = {With the increasing demands from industrial and daily usage, dexterous robot manipulation in unstructured environment becomes a very important technology to make robots behave smartly and interact with people intelligently. To deal with the challenge in efficient task teaching of a robot with multiple Degree-of-freedom (DoF) for dexterous manipulation, this paper develops a mirrored motion remapping method, for enabling a human operator to teach a dual-arm robot face-to-face by direct human motion through handheld HTC VIVE},
author = {Yuan, Qilong and Weng, Ching-Yen and Lim, Zhong Jin and Chen, I-Ming},
booktitle = {2019 IEEE/ASME International Conference on Advanced Intelligent Mechatronics (AIM)},
organization = {IEEE},
pages = {1086--1091},
pub_year = {2019},
title = {A mirrored motion remapping method in telemanipulation-based face-to-face dual-arm robot teaching},
venue = {2019 IEEE/ASME …}
}
@article{liu2019human,
abstract = {Purpose The purpose of this paper is to propose a new method based on three-dimensional (3D) vision technologies and human skill integrated deep learning to solve assembly positioning task such as peg-in-hole. Design/methodology/approach Hybrid camera configuration was used to provide the global and local views. Eye-in-hand mode guided the peg to be in contact with the hole plate using 3D vision in global view. When the peg was in contact with the workpiece surface, eye-to-hand mode provided the local view to accomplish},
author = {Liu, Yi and Cong, Ming and Dong, Hang and Liu, Dong},
journal = {Industrial Robot: the international journal of robotics research and application},
pub_year = {2019},
publisher = {Emerald Publishing Limited},
title = {Human skill integrated motion planning of assembly manipulation for 6R industrial robot},
venue = {Industrial Robot: the international …}
}
@techreport{gill2019evaluation,
title={Evaluation of Navigation in Mobile Robots for Long-Term Autonomy in Automotive Manufacturing Environments},
author={Gill, Jasprit Singh and Tomaszewski, Mark and Jia, Yunyi and Pisu, Pierluigi and Krovi, Venkat N},
year={2019},
institution={SAE Technical Paper}
}
@article{luo2019task,
abstract = {Telerobotic systems have attracted growing attention because of their superiority in the dangerous or unknown interaction tasks. It is very challenging to exploit such systems to implement complex tasks in an autonomous way. In this paper, we propose a task learning framework to represent the manipulation skill demonstrated by a remotely controlled robot. Gaussian mixture model is utilized to encode and parametrize the smooth task trajectory according to the observations from the demonstrations. After encoding the demonstrated},
author = {Luo, Jing and Yang, Chenguang and Li, Qiang and Wang, Min},
journal = {International Journal of Humanoid Robotics},
number = {02},
pages = {1950009},
pub_year = {2019},
publisher = {World Scientific},
title = {A task learning mechanism for the telerobots},
venue = {International Journal of Humanoid …},
volume = {16}
}
@article{perico2019combining,
abstract = {This letter combines an combines an imitation learning approach with a model-based and constraint-based task specification and control methodology. Imitation learning provides an intuitive way for the end user to specify context of a new robot application without the need of traditional programming skills. On the other hand, constraint-based robot programming allows us to define complex tasks involving different kinds of sensor input. Combination of both enables adaptation of complex tasks to new environments and new objects with a small},
author = {Perico, Cristian Alejandro Vergara and De Schutter, Joris and Aertbeli{\"e}n, Erwin},
journal = {IEEE Robotics and Automation Letters},
number = {2},
pages = {1892--1899},
pub_year = {2019},
publisher = {IEEE},
title = {Combining imitation learning with constraint-based task specification and control},
venue = {IEEE Robotics and …},
volume = {4}
}
@article{gomes2019challenges,
abstract = {Heterogeneous multirobot systems have shown significant potential in many applications. Cooperative coevolutionary algorithms (CCEAs) represent a promising approach to synthesise controllers for such systems, as they can evolve multiple co-adapted components. Although CCEAs allow for an arbitrary level of team heterogeneity, in previous works heterogeneity is typically only addressed at the behavioural level. In this paper, we study the use of CCEAs to evolve control for a heterogeneous multirobot system where the},
author = {Gomes, Jorge and Mariano, Pedro and Christensen, Anders Lyhne},
journal = {Natural Computing},
number = {1},
pages = {29--46},
pub_year = {2019},
publisher = {Springer},
title = {Challenges in cooperative coevolution of physically heterogeneous robot teams},
venue = {Natural Computing},
volume = {18}
}
@article{de2019bio,
abstract = {This paper aims to present a cooperative and distributed navigation strategy, that is an on-line path planner, for an autonomous multi-robot system. The robots are intended to navigate and explore an unknown environment in order to find and reach obligatory passage points or way-points (goals), and then achieve a known final position. All robots in the team are homogeneous, independent and have limited communication skills. However they interact among them and with the environment to autonomously decide about their},
author = {de Almeida, Jo{\~a}o Paulo Lima Silva and Nakashima, Renan Taizo and Neves-Jr, Fl{\'a}vio and de Arruda, L{\'u}cia Val{\'e}ria Ramos},
journal = {Robotics and Autonomous Systems},
pages = {32--48},
pub_year = {2019},
publisher = {Elsevier},
title = {Bio-inspired on-line path planner for cooperative exploration of unknown environment by a Multi-Robot System},
venue = {Robotics and …},
volume = {112}
}
@article{malik2019advances,
abstract = {Human-robot collaboration can be used to share workload to form semi-automated production systems. Assembly operations are recognized with high potential to increase productivity by using the best skills of humans and robots in a combination. Components and parts to be assembled need to be structured and presented to the robot in a known location and orientation. The process of presenting parts to the robot for assembly tasks is referred to as parts feeding. Feeding system needs to be adaptable to dynamics of parts'},
author = {Malik, Ali Ahmad and Andersen, Martin Vejling and Bilberg, Arne},
journal = {Procedia Manufacturing},
pages = {1228--1235},
pub_year = {2019},
publisher = {Elsevier},
title = {Advances in machine vision for flexible feeding of assembly parts},
venue = {Procedia Manufacturing},
volume = {38}
}
@inproceedings{liang2019teaching,
abstract = {Robots are expected to be widely used on future construction sites to assist human workers in the performance of repetitive physically-demanding tasks. Unlike typical manufacturing assembly lines, where parts are delivered to robots and workers in stationary workstations, construction robots and human workers must accumulate all necessary resources and repeatedly navigate to desired assembly locations on-site to perform useful work. The condition of such resources and the geometry of the environment are constantly changing},
author = {Liang, Ci-Jyun and Kamat, VR and Menassa, CC},
booktitle = {ISARC. Proceedings of the International Symposium on Automation and Robotics in Construction},
organization = {IAARC Publications},
pages = {1305--1311},
pub_year = {2019},
title = {Teaching robots to perform construction tasks via learning from demonstration},
venue = {ISARC. Proceedings of the …},
volume = {36}