Difference between revisions of "R: stopwords"
Jump to navigation
Jump to search
Onnowpurbo (talk | contribs) |
Onnowpurbo (talk | contribs) |
||
Line 16: | Line 16: | ||
− | + | ==Contoh 1== | |
documents = c("She had toast for breakfast", | documents = c("She had toast for breakfast", | ||
Line 30: | Line 30: | ||
documents = tm_map(documents, removeWords, stopwords("english")) | documents = tm_map(documents, removeWords, stopwords("english")) | ||
documents | documents | ||
+ | |||
+ | |||
+ | ==Contoh 2== | ||
+ | |||
+ | |||
+ | #downloading and installing the package from CRAN | ||
+ | install.packages("tm") | ||
+ | #loading tm | ||
+ | library(tm) | ||
+ | |||
+ | #loading a text file from local computer | ||
+ | newdata<- readlines(filepath) | ||
+ | #Load data as corpus | ||
+ | #VectorSource() creates character vectors | ||
+ | mydata <- Corpus(VectorSource(newdata)) | ||
+ | |||
+ | # convert to lower case | ||
+ | mydata <- tm_map(mydata, content_transformer(tolower)) | ||
+ | #remove ������ what would be emojis | ||
+ | mydata<-tm_map(mydata, content_transformer(gsub), pattern="\\W",replace=" ") | ||
+ | # remove URLs | ||
+ | removeURL <- function(x) gsub("http[^[:space:]]*", "", x) | ||
+ | mydata <- tm_map(mydata, content_transformer(removeURL)) | ||
+ | # remove anything other than English letters or space | ||
+ | removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x) | ||
+ | mydata <- tm_map(mydata, content_transformer(removeNumPunct)) | ||
+ | # remove stopwords | ||
+ | mydata <- tm_map(mydata, removeWords, stopwords("english")) | ||
+ | #u can create custom stop words using the code below. | ||
+ | #myStopwords <- c(setdiff(stopwords('english'), c("r", "big")),"use", "see", "used", "via", "amp") | ||
+ | #mydata <- tm_map(mydata, removeWords, myStopwords) | ||
+ | # remove extra whitespace | ||
+ | mydata <- tm_map(mydata, stripWhitespace) | ||
+ | # Remove numbers | ||
+ | mydata <- tm_map(mydata, removeNumbers) | ||
+ | # Remove punctuations | ||
+ | mydata <- tm_map(mydata, removePunctuation) | ||
+ | |||
+ | |||
+ | # stemmimg | ||
+ | library(SnowballC) | ||
+ | mydata <- tm_map(mydata, stemDocument) | ||
Revision as of 12:58, 1 November 2018
install.packages("stopwords")
# atau install.packages("devtools") devtools::install_github("quanteda/stopwords")
head(stopwords::stopwords("de", source = "snowball"), 20) head(stopwords::stopwords("id", source = "stopwords-iso"), 20)
stopwords::stopwords_getsources() stopwords::stopwords_getlanguages("snowball") stopwords::stopwords_getlanguages("stopwords-iso")
Contoh 1
documents = c("She had toast for breakfast", "The coffee this morning was excellent", "For lunch let's all have pancakes", "Later in the day, there will be more talks", "The talks on the first day were great", "The second day should have good presentations too") library(tm) documents <- Corpus(VectorSource(documents)) documents = tm_map(documents, content_transformer(tolower)) documents = tm_map(documents, removePunctuation) documents = tm_map(documents, removeWords, stopwords("english")) documents
Contoh 2
#downloading and installing the package from CRAN install.packages("tm") #loading tm library(tm)
#loading a text file from local computer newdata<- readlines(filepath) #Load data as corpus #VectorSource() creates character vectors mydata <- Corpus(VectorSource(newdata))
# convert to lower case mydata <- tm_map(mydata, content_transformer(tolower)) #remove ������ what would be emojis mydata<-tm_map(mydata, content_transformer(gsub), pattern="\\W",replace=" ") # remove URLs removeURL <- function(x) gsub("http[^[:space:]]*", "", x) mydata <- tm_map(mydata, content_transformer(removeURL)) # remove anything other than English letters or space removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x) mydata <- tm_map(mydata, content_transformer(removeNumPunct)) # remove stopwords mydata <- tm_map(mydata, removeWords, stopwords("english")) #u can create custom stop words using the code below. #myStopwords <- c(setdiff(stopwords('english'), c("r", "big")),"use", "see", "used", "via", "amp") #mydata <- tm_map(mydata, removeWords, myStopwords) # remove extra whitespace mydata <- tm_map(mydata, stripWhitespace) # Remove numbers mydata <- tm_map(mydata, removeNumbers) # Remove punctuations mydata <- tm_map(mydata, removePunctuation)
# stemmimg library(SnowballC) mydata <- tm_map(mydata, stemDocument)