@article{aup:/content/journals/10.5117/CCR2020.2.001.MAIE, author = "Maier, Daniel and Niekler, Andreas and Wiedemann, Gregor and Stoltenberg, Daniela", title = "How Document Sampling and Vocabulary Pruning Affect the Results of Topic Models", journal= "Computational Communication Research", year = "2020", volume = "2", number = "2", pages = "139-152", doi = "https://doi.org/10.5117/CCR2020.2.001.MAIE", url = "https://www.aup-online.com/content/journals/10.5117/CCR2020.2.001.MAIE", publisher = "Amsterdam University Press", issn = "2665-9085", type = "Journal Article", keywords = "model selection", keywords = "preprocessing", keywords = "topic model", keywords = "text analysis", keywords = "latent Dirichlet allocation", abstract = "Abstract Topic modeling enables researchers to explore large document corpora. Large corpora, however, can be extremely costly to model in terms of time and computing resources. In order to circumvent this problem, two techniques have been suggested: (1) to model random document samples, and (2) to prune the vocabulary of the corpus. Although frequently applied, there has been no systematic inquiry into how the application of these techniques affects the respective models. Using three empirical corpora with different characteristics (news articles, websites, and Tweets), we systematically investigated how different sample sizes and pruning affect the resulting topic models in comparison to models of the full corpora. Our inquiry provides evidence that both techniques are viable tools that will likely not impair the resulting model. Sample-based topic models closely resemble corpus-based models if the sample size is large enough (> 10,000 documents). Moreover, extensive pruning does not compromise the quality of the resultant topics.", }