@InProceedings{10.1007/978-3-030-23807-0_1, author="Ard{\'o}n, Paola and Pairet, {\`E}ric and Petrick, Ron and Ramamoorthy, Subramanian and Lohan, Katrin", editor="Althoefer, Kaspar and Konstantinova, Jelizaveta and Zhang, Ketao", title="Reasoning on Grasp-Action Affordances", booktitle="Towards Autonomous Robotic Systems", year="2019", publisher="Springer International Publishing", address="Cham", pages="3--15", abstract="Artificial intelligence is essential to succeed in challenging activities that involve dynamic environments, such as object manipulation tasks in indoor scenes. Most of the state-of-the-art literature explores robotic grasping methods by focusing exclusively on attributes of the target object. When it comes to human perceptual learning approaches, these physical qualities are not only inferred from the object, but also from the characteristics of the surroundings. This work proposes a method that includes environmental context to reason on an object affordance to then deduce its grasping regions. This affordance is reasoned using a ranked association of visual semantic attributes harvested in a knowledge base graph representation. The framework is assessed using standard learning evaluation metrics and the zero-shot affordance prediction scenario. The resulting grasping areas are compared with unseen labelled data to asses their accuracy matching percentage. The outcome of this evaluation suggest the autonomy capabilities of the proposed method for object interaction applications in indoor environments.", isbn="978-3-030-23807-0" }