diff --git a/Project.toml b/Project.toml index 441da011..6294e2e8 100644 --- a/Project.toml +++ b/Project.toml @@ -12,6 +12,7 @@ InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" Languages = "8ef0a80b-9436-5d2c-a485-80b904378c43" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -29,6 +30,7 @@ DelimitedFiles = "1" DocStringExtensions = "0.9" JSON = "0.21, 1" Languages = "0.4" +OrderedCollections = "1.7.0" ProgressMeter = "1" Snowball = "0.1" Statistics = "1" diff --git a/docs/src/features.md b/docs/src/features.md index eb95e6db..2e2ba289 100644 --- a/docs/src/features.md +++ b/docs/src/features.md @@ -102,6 +102,20 @@ julia> hash_dtv(crps[1]) 0 0 0 0 0 0 0 0 0 0 0 0 0 … 0 0 0 0 0 0 0 0 0 0 0 0 ``` +## Top Features + +We can use the function `top_terms(x, n)` to quickly view the top features of a `Document`, `DocumentTermMatrix` or `Corpus`. + +```julia +julia> top_terms(m, 5) +OrderedCollections.OrderedDict{String, Int64} with 6 entries: + "To" => 2 + "be" => 2 + "become" => 2 + "not" => 2 + "or" => 2 +``` + ## TF (Term Frequency) Often we need to find out what proportion of a document is contributed by each term. This can be done using the term frequency function: diff --git a/src/TextAnalysis.jl b/src/TextAnalysis.jl index 2d763b6b..99d3a3d3 100644 --- a/src/TextAnalysis.jl +++ b/src/TextAnalysis.jl @@ -3,6 +3,7 @@ using SparseArrays using Printf using LinearAlgebra using StatsBase: countmap, addcounts! +using OrderedCollections: OrderedDict using Languages using WordTokenizers using Snowball @@ -54,6 +55,7 @@ export tf, tf_idf, bm_25, lsa, lda, summarize, cos_similarity export tf!, tf_idf!, bm_25!, lda! export remove_patterns!, remove_patterns export prune! +export top_terms export strip_patterns, strip_corrupt_utf8, strip_case, stem_words, tag_part_of_speech, strip_whitespace, strip_punctuation export strip_numbers, strip_non_letters, strip_indefinite_articles, strip_definite_articles, strip_articles diff --git a/src/corpus.jl b/src/corpus.jl index 9d3b273b..4374ada7 100644 --- a/src/corpus.jl +++ b/src/corpus.jl @@ -298,3 +298,19 @@ function standardize!(crps::Corpus, ::Type{T}) where {T<:AbstractDocument} crps.documents[i] = convert(T, crps.documents[i]) end end + +############################################################################## +# +# top_terms() methods +# +############################################################################## + +function top_terms(lx::Dict{String,Int}, ::Val{N}) where {N} + D_pairs = collect(pairs(lx)) + n = min(N, length(D_pairs)) + # Count decreasing, break ties alphabetically + idx = partialsortperm(D_pairs, 1:n, by = p -> (-p.second, p.first)) + OrderedDict(D_pairs[idx]) +end +top_terms(lx::Dict{String,Int}, n::Int) = top_terms(lx, Val(n)) +top_terms(crps::Corpus, n::Int) = top_terms(lexicon(crps), Val(n)) \ No newline at end of file diff --git a/src/document.jl b/src/document.jl index e933f9a7..bf14373f 100644 --- a/src/document.jl +++ b/src/document.jl @@ -398,3 +398,18 @@ Base.convert(::Type{NGramDocument}, d::NGramDocument) = d ############################################################################## Base.getindex(d::AbstractDocument, term::AbstractString) = ngrams(d)[term] + +############################################################################## +# +# top_terms() methods +# +############################################################################## + +function top_terms(d::AbstractDocument, ::Val{N}) where {N} + D_pairs = collect(pairs(countmap(tokens(d)))) + n = min(N, length(D_pairs)) + # Count decreasing, break ties alphabetically + idx = partialsortperm(D_pairs, 1:n; by = p -> (-p.second, p.first)) + OrderedDict(D_pairs[idx]) +end +top_terms(d::AbstractDocument, n::Int) = top_terms(d, Val(n)) \ No newline at end of file diff --git a/src/dtm.jl b/src/dtm.jl index 35c9cc7c..b2ab553e 100644 --- a/src/dtm.jl +++ b/src/dtm.jl @@ -440,3 +440,21 @@ function merge!(dtm1::DocumentTermMatrix{T}, dtm2::DocumentTermMatrix{T}) where dtm1 end + +""" + top_terms(x) + top_terms(x, n) + +Return terms sorted in descending frequency. With `n`, return only the top `n` terms. +Accepts a `Corpus`, `AbstractDocument`, lexicon `Dict`, or `DocumentTermMatrix`. +Ties are sorted alphabetically. +""" +function top_terms(D::DocumentTermMatrix, ::Val{N}) where {N} + counts = @view(sum(D.dtm; dims=1)[1, :]) + D_pairs = D.terms .=> counts + n = min(N, length(D_pairs)) + # Count decreasing, break ties alphabetically + idx = partialsortperm(D_pairs, 1:n; by = p -> (-p.second, p.first)) + OrderedDict(D_pairs[idx]) +end +top_terms(D::DocumentTermMatrix, n::Int) = top_terms(D, Val(n)) \ No newline at end of file diff --git a/test/corpus.jl b/test/corpus.jl index 044c89a6..3495697b 100644 --- a/test/corpus.jl +++ b/test/corpus.jl @@ -39,6 +39,8 @@ update_lexicon!(crps) answer = Dict("1" => 2, "2" => 1, "4" => 1) + @test top_terms(crps, 1) == top_terms(crps[1], 1) + @test answer == lexicon(crps) end diff --git a/test/document.jl b/test/document.jl index 8ffa3ef3..4af83db3 100644 --- a/test/document.jl +++ b/test/document.jl @@ -66,6 +66,12 @@ @test isa(ngd, NGramDocument) @test "To" in keys(ngrams(ngd)) + # Test top features + top = top_terms(sd, 5) + @test collect(keys(top)) == ["be", "To", "not", "or", "to"] + @test collect(values(top)) == [2, 1, 1, 1, 1] + @test top_terms(sd, 2) == OrderedDict("be" => 2, "To" => 1) + sd = StringDocument(hamlet_text) td = TokenDocument(hamlet_text) ngd = NGramDocument(hamlet_text) diff --git a/test/dtm.jl b/test/dtm.jl index 0a2f01fd..8292152b 100644 --- a/test/dtm.jl +++ b/test/dtm.jl @@ -109,4 +109,13 @@ @test dtm2.terms == ["five", "four", "three", "two"] @test size(dtm2.dtm) == (2, 4) @test sum(dtm2.dtm, dims=(1,)) == [1 2 2 1] + + # Test top_terms + crps3 = Corpus([FileDocument(sample_file)]) + update_lexicon!(crps3) + m3 = DocumentTermMatrix(crps3) + top5 = top_terms(m3, 5) + @test top5 isa OrderedDict + @test collect(keys(top5)) == [",", "thou", "And", "and", ";"] + @test collect(values(top5)) == [29, 6, 5, 5, 3] end diff --git a/test/runtests.jl b/test/runtests.jl index 3ea9e016..471b95d6 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,6 +4,7 @@ using Languages using TextAnalysis using WordTokenizers using Serialization +using OrderedCollections: OrderedDict tests = [ "coom.jl"