@article{mixedemotions-gsi-article-2018, author = "Paul Buitelaar and Ian D. Wood and Mihael Arcan and John P. McCrae and Andrejs Abele and C{\'e}cile Robin and Vladimir Andryushechkin and Housam Ziad and Hesam Sagha and Maximilian Schmitt and Bj{\"o}rn W. Schuller and S{\'a}nchez-Rada, J. Fernando and Iglesias, Carlos A. and Carlos Navarro and Andreas Giefer and Nicolaus Heise and Vincenzo Masucci and Francesco A. Danza and Ciro Caterino and Pavel Smr\v{z} and Michal Hradiš and Filip Povoln{\'y} and Marek Klimeš and Pavel Matějka and Giovanni Tummarello", abstract = "Recently, there is an increasing tendency to embed the functionality of recognizing emotions from the user generated contents, to infer richer profile about the users or contents, that can be used for various automated systems such as call-center operations, recommendations, and assistive technologies. However, to date, adding this functionality was a tedious, costly, and time consuming effort, and one should look for different tools that suits one's needs, and should provide different interfaces to use those tools. The MixedEmotions toolbox leverages the need for such functionalities by providing tools for text, audio, video, and linked data processing within an easily integrable plug-and-play platform. These functionalities include: (i) for text processing: emotion and sentiment recognition, (ii) for audio processing: emotion, age, and gender recognition, (iii) for video processing: face detection and tracking, emotion recognition, facial landmark localization, head pose estimation, face alignment, and body pose estimation, and (iv) for linked data: knowledge graph. Moreover, the MixedEmotions Toolbox is open-source and free. In this article, we present this toolbox in the context of the existing landscape, and provide a range of detailed benchmarks on standardized test-beds showing its state-of-the-art performance. Furthermore, three real-world use-cases show its effectiveness, namely emotion-driven smart TV, call center monitoring, and brand reputation analysis.", awards = "JCR 2018 4.292 (Q1)", comments = "JCR 2018 Q1 5.452, SJR 2018 Q1 1.222, Scopus 2018 Q1 9.3", doi = "10.1109/TMM.2018.2798287", issn = "1520-9210", journal = "IEEE Transactions on Multimedia", keywords = "emotion analysi;open source toolbox;affective computing;linked data;audio processing;text processing;video processing", month = "September", number = "9", pages = "2454-2465", title = "{M}ixed{E}motions: {A}n {O}pen-{S}ource {T}oolbox for {M}ulti-{M}odal {E}motion {A}nalysis", url = "http://ieeexplore.ieee.org/document/8269329/", volume = "20", year = "2018", }