@inproceedings{voelkel2020b, author = {V\"{o}lkel, Sarah Theres and Haeuslschmid, Renate and Werner, Anna and Hussmann, Heinrich and Butz, Andreas}, title = {How to Trick AI: Users' Strategies for Protecting Themselves from Automatic Personality Assessment}, year = {2020}, isbn = {9781450367080}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3313831.3376877}, doi = {10.1145/3313831.3376877}, abstract = {Psychological targeting tries to influence and manipulate users' behaviour. We investigated whether users can protect themselves from being profiled by a chatbot, which automatically assesses users' personality. Participants interacted twice with the chatbot: (1) They chatted for 45 minutes in customer service scenarios and received their actual profile (baseline). (2) They then were asked to repeat the interaction and to disguise their personality by strategically tricking the chatbot into calculating a falsified profile. In interviews, participants mentioned 41 different strategies but could only apply a subset of them in the interaction. They were able to manipulate all Big Five personality dimensions by nearly 10%. Participants regarded personality as very sensitive data. As they found tricking the AI too exhaustive for everyday use, we reflect on opportunities for privacy protective designs in the context of personality-aware systems.}, booktitle = {Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}, pages = {1–15}, numpages = {15}, keywords = {chatbot, automatic personality assessment, personality}, location = {Honolulu, HI, USA}, series = {CHI '20} }