@article {Reilly:2012:1525-4011:75, title = "CRL Reports", journal = "The Charleston Advisor", parent_itemid = "infobike://annurev/tca", publishercode ="annurev", year = "2012", volume = "14", number = "2", publication date ="2012-10-01T00:00:00", pages = "75-76", itemtype = "ARTICLE", issn = "1525-4011", eissn = "1525-4003", url = "https://annurev.publisher.ingentaconnect.com/content/annurev/tca/2012/00000014/00000002/art00022", doi = "doi:10.5260/chara.14.2.75", author = "Reilly, Bernard F.", abstract = "There has been a fair amount of discussion of late in information industry circles about text mining. Researchers in academia now have access to immense corpora of text that are openly available on the Web: the millions of public domain books and serials available courtesy of Google; and vast troves of government documents courtesy of open government initiatives in the U.S. and U.K. and third-party actors like WikiLeaks and the National Security Archive. The growing application of text mining techniques and technologies in many fields of research has implications that are beginning to be felt by libraries.Text mining is generally defined as the automated processing of large amounts of digital data or textual content for purposes of information retrieval, extraction, interpretation, and analysis. Modern researchers now employ proprietary and open source software and tools to process and make sense of the oceans of information at their disposal in ways never before possible. Most text mining involves downloading a fixed body of text and accompanying metadata to a local host system or platform, and running it through certain processes that can detect patterns, trends, biases, and other phenomena in the underlying content. These phenomena can then form the basis for new observations, visualizations, models, and so forth.1", }