Difference between revisions of "R: tidytext RPJP BAPPENAS"

From OnnoWiki
Jump to navigation Jump to search
Line 29: Line 29:
 
     stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"),
 
     stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"),
 
     stemming = TRUE))
 
     stemming = TRUE))
 +
 +
# tanpa Stemming
 +
#
 +
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
 +
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words",
 +
    removePunctuation = TRUE,
 +
    stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"))
 +
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
 +
    removePunctuation = TRUE,
 +
    stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"))
  
 
  inspect(dtm_unigram)
 
  inspect(dtm_unigram)

Revision as of 16:26, 6 November 2018

install.packages("xlsx")
install.packages("tm")
install.packages("wordcloud")
install.packages("ggplot2")
library(xlsx)
library(tm)
library(wordcloud)
library(ggplot2)
library(tidyverse)
library(tidytext)
library(tm)
directory <- "data-pdf"

# create corpus from pdfs
docs <- VCorpus(DirSource(directory), readerControl = list(reader = readPDF))
# docs <- VCorpus(DirSource("data", recursive=TRUE))
# Get the document term matrices
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words", 
    removePunctuation = TRUE, 
    stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"),
    stemming = TRUE))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
    removePunctuation = TRUE,
    stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"),
    stemming = TRUE))
# tanpa Stemming
#
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_unigram <- DocumentTermMatrix(docs, control = list(tokenize="words", 
    removePunctuation = TRUE, 
    stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"))
dtm_bigram <- DocumentTermMatrix(docs, control = list(tokenize = BigramTokenizer,
    removePunctuation = TRUE,
    stopwords = c(stopwords::stopwords("id", source = "stopwords-iso"),"tabel","pada","dan"))
inspect(dtm_unigram)
inspect(dtm_bigram)


converted %>%
  tidy() %>%
  filter(!grepl("[0-9]+", term))
# converted adalah DocumentTermMatrix



Pranala Menarik