Difference between revisions of "R: tidytext RPJP BAPPENAS"

From OnnoWiki
Jump to navigation Jump to search
Line 15: Line 15:
  
  
 +
 +
docs <- VCorpus(DirSource("data", recursive=TRUE))
 +
# Get the document term matrices
 +
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
 +
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words",
 +
    removePunctuation = TRUE,
 +
    stopwords = stopwords("english"),
 +
    stemming = TRUE))
 +
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
 +
    removePunctuation = TRUE,
 +
    stopwords = stopwords("english"),
 +
    stemming = TRUE))
 +
 +
inspect(dtm_unigram)
 +
inspect(dtm_bigram)
  
  
original_converted <- converted %>%
 
  group_by(document) %>%
 
  ungroup()
 
original_converted
 
  
  

Revision as of 12:41, 6 November 2018

library(tidyverse)
library(tidytext)
library(tm)
directory <- "data-pdf"

# create corpus from pdfs
converted <- VCorpus(DirSource(directory), readerControl = list(reader = readPDF)) %>% 
  DocumentTermMatrix()

converted %>%
  tidy() %>%
  filter(!grepl("[0-9]+", term))
# converted adalah DocumentTermMatrix


docs <- VCorpus(DirSource("data", recursive=TRUE))
# Get the document term matrices
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words", 
    removePunctuation = TRUE, 
    stopwords = stopwords("english"), 
    stemming = TRUE))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
    removePunctuation = TRUE,
    stopwords = stopwords("english"),
    stemming = TRUE))
inspect(dtm_unigram)
inspect(dtm_bigram)



Pranala Menarik