2018-03-07 23:58:33 +00:00
|
|
|
macro add_mapping(mapping)
|
|
|
|
def initialize({{*mapping.keys.map { |id| "@#{id}".id }}})
|
|
|
|
end
|
|
|
|
|
|
|
|
def to_a
|
|
|
|
return [{{*mapping.keys.map { |id| "@#{id}".id }}}]
|
|
|
|
end
|
|
|
|
|
|
|
|
DB.mapping({{mapping}})
|
|
|
|
end
|
|
|
|
|
|
|
|
macro templated(filename)
|
|
|
|
render "src/views/#{{{filename}}}.ecr", "src/views/layout.ecr"
|
|
|
|
end
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
class Video
|
|
|
|
module HTTPParamConverter
|
|
|
|
def self.from_rs(rs)
|
|
|
|
HTTP::Params.parse(rs.read(String))
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2018-03-07 23:58:33 +00:00
|
|
|
add_mapping({
|
2018-01-28 02:09:27 +00:00
|
|
|
id: String,
|
|
|
|
info: {
|
|
|
|
type: HTTP::Params,
|
|
|
|
default: HTTP::Params.parse(""),
|
|
|
|
converter: Video::HTTPParamConverter,
|
|
|
|
},
|
|
|
|
updated: Time,
|
|
|
|
title: String,
|
|
|
|
views: Int64,
|
|
|
|
likes: Int32,
|
|
|
|
dislikes: Int32,
|
|
|
|
wilson_score: Float64,
|
2018-02-03 03:44:10 +00:00
|
|
|
published: Time,
|
2018-02-27 00:58:45 +00:00
|
|
|
description: String,
|
2018-01-28 02:09:27 +00:00
|
|
|
})
|
|
|
|
end
|
|
|
|
|
2018-03-03 21:06:14 +00:00
|
|
|
class RedditSubmit
|
|
|
|
JSON.mapping({
|
|
|
|
data: RedditSubmitData,
|
|
|
|
})
|
|
|
|
end
|
|
|
|
|
|
|
|
class RedditSubmitData
|
|
|
|
JSON.mapping({
|
|
|
|
children: Array(RedditThread),
|
|
|
|
})
|
|
|
|
end
|
|
|
|
|
|
|
|
class RedditThread
|
|
|
|
JSON.mapping({
|
|
|
|
data: RedditThreadData,
|
|
|
|
})
|
|
|
|
end
|
|
|
|
|
|
|
|
class RedditThreadData
|
|
|
|
JSON.mapping({
|
|
|
|
subreddit: String,
|
|
|
|
id: String,
|
|
|
|
num_comments: Int32,
|
|
|
|
score: Int32,
|
|
|
|
author: String,
|
|
|
|
permalink: String,
|
|
|
|
title: String,
|
|
|
|
})
|
|
|
|
end
|
|
|
|
|
2018-01-21 00:19:12 +00:00
|
|
|
# See http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
|
|
|
|
def ci_lower_bound(pos, n)
|
|
|
|
if n == 0
|
2018-01-28 02:09:27 +00:00
|
|
|
return 0.0
|
2018-01-21 00:19:12 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
# z value here represents a confidence level of 0.95
|
|
|
|
z = 1.96
|
|
|
|
phat = 1.0*pos/n
|
|
|
|
|
|
|
|
return (phat + z*z/(2*n) - z * Math.sqrt((phat*(1 - phat) + z*z/(4*n))/n))/(1 + z*z/n)
|
|
|
|
end
|
|
|
|
|
|
|
|
def elapsed_text(elapsed)
|
|
|
|
millis = elapsed.total_milliseconds
|
|
|
|
return "#{millis.round(2)}ms" if millis >= 1
|
|
|
|
|
|
|
|
"#{(millis * 1000).round(2)}µs"
|
|
|
|
end
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
def get_client(pool)
|
|
|
|
while pool.empty?
|
2018-01-21 00:19:12 +00:00
|
|
|
sleep rand(0..10).milliseconds
|
|
|
|
end
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
return pool.shift
|
2018-01-21 00:19:12 +00:00
|
|
|
end
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
def fetch_video(id, client)
|
2018-03-04 14:54:19 +00:00
|
|
|
info = client.get("/get_video_info?video_id=#{id}&el=detailpage&ps=default&eurl=&gl=US&hl=en").body
|
|
|
|
html = client.get("/watch?v=#{id}").body
|
2018-01-21 00:19:12 +00:00
|
|
|
|
|
|
|
html = XML.parse_html(html)
|
2018-01-28 02:09:27 +00:00
|
|
|
info = HTTP::Params.parse(info)
|
2018-01-21 00:19:12 +00:00
|
|
|
|
|
|
|
if info["reason"]?
|
2018-02-03 04:04:34 +00:00
|
|
|
info = client.get("/get_video_info?video_id=#{id}&ps=default&eurl=&gl=US&hl=en").body
|
|
|
|
info = HTTP::Params.parse(info)
|
|
|
|
if info["reason"]?
|
|
|
|
raise info["reason"]
|
|
|
|
end
|
2018-01-21 00:19:12 +00:00
|
|
|
end
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
title = info["title"]
|
|
|
|
|
|
|
|
views = info["view_count"].to_i64
|
2018-01-21 00:19:12 +00:00
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
likes = html.xpath_node(%q(//button[@title="I like this"]/span))
|
2018-02-05 01:42:13 +00:00
|
|
|
likes = likes ? likes.content.delete(",").to_i : 0
|
2018-01-28 02:09:27 +00:00
|
|
|
|
|
|
|
dislikes = html.xpath_node(%q(//button[@title="I dislike this"]/span))
|
|
|
|
dislikes = dislikes ? dislikes.content.delete(",").to_i : 0
|
|
|
|
|
2018-02-27 00:58:45 +00:00
|
|
|
description = html.xpath_node(%q(//p[@id="eow-description"]))
|
|
|
|
description = description ? description.to_xml : ""
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
wilson_score = ci_lower_bound(likes, likes + dislikes)
|
|
|
|
|
2018-02-05 01:42:13 +00:00
|
|
|
published = html.xpath_node(%q(//strong[contains(@class,"watch-time-text")]))
|
2018-02-03 03:44:10 +00:00
|
|
|
if published
|
|
|
|
published = published.content
|
2018-02-05 01:42:13 +00:00
|
|
|
else
|
|
|
|
raise "Could not find date published"
|
|
|
|
end
|
|
|
|
|
|
|
|
published = published.lchop("Published ")
|
|
|
|
published = published.lchop("Streamed live ")
|
|
|
|
published = published.lchop("Started streaming ")
|
|
|
|
published = published.lchop("on ")
|
|
|
|
published = published.lchop("Scheduled for ")
|
2018-02-05 23:57:03 +00:00
|
|
|
if !published.includes?("ago")
|
|
|
|
published = Time.parse(published, "%b %-d, %Y")
|
|
|
|
else
|
|
|
|
# Time matches format "20 hours ago", "40 minutes ago"...
|
2018-02-05 01:42:13 +00:00
|
|
|
delta = published.split(" ")[0].to_i
|
|
|
|
case published
|
|
|
|
when .includes? "minute"
|
|
|
|
published = Time.now - delta.minutes
|
|
|
|
when .includes? "hour"
|
|
|
|
published = Time.now - delta.hours
|
2018-02-05 23:57:03 +00:00
|
|
|
else
|
2018-02-05 01:42:13 +00:00
|
|
|
raise "Could not parse #{published}"
|
2018-02-05 23:57:03 +00:00
|
|
|
end
|
2018-02-03 03:44:10 +00:00
|
|
|
end
|
|
|
|
|
2018-02-27 00:58:45 +00:00
|
|
|
video = Video.new(id, info, Time.now, title, views, likes, dislikes, wilson_score, published, description)
|
2018-01-21 00:19:12 +00:00
|
|
|
|
|
|
|
return video
|
|
|
|
end
|
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
def get_video(id, client, db, refresh = true)
|
|
|
|
if db.query_one?("SELECT EXISTS (SELECT true FROM videos WHERE id = $1)", id, as: Bool)
|
|
|
|
video = db.query_one("SELECT * FROM videos WHERE id = $1", id, as: Video)
|
2018-01-21 00:19:12 +00:00
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
# If record was last updated over an hour ago, refresh (expire param in response lasts for 6 hours)
|
|
|
|
if refresh && Time.now - video.updated > 1.hours
|
|
|
|
video = fetch_video(id, client)
|
2018-03-04 14:54:19 +00:00
|
|
|
db.exec("DELETE FROM videos * WHERE id = $1", id)
|
|
|
|
args = arg_array(video.to_a)
|
|
|
|
db.exec("INSERT INTO videos VALUES (#{args})", video.to_a)
|
2018-01-21 00:19:12 +00:00
|
|
|
end
|
|
|
|
else
|
2018-01-28 02:09:27 +00:00
|
|
|
video = fetch_video(id, client)
|
2018-03-04 14:54:19 +00:00
|
|
|
args = arg_array(video.to_a)
|
|
|
|
db.exec("INSERT INTO videos VALUES (#{args})", video.to_a)
|
2018-01-21 00:19:12 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
return video
|
|
|
|
end
|
2018-01-21 23:49:27 +00:00
|
|
|
|
2018-01-28 02:09:27 +00:00
|
|
|
def search(query, client)
|
2018-03-04 14:54:19 +00:00
|
|
|
html = client.get("https://www.youtube.com/results?q=#{query}&sp=EgIQAVAU").body
|
2018-01-21 23:49:27 +00:00
|
|
|
|
|
|
|
html = XML.parse_html(html)
|
|
|
|
|
|
|
|
html.xpath_nodes(%q(//ol[@class="item-section"]/li)).each do |item|
|
|
|
|
root = item.xpath_node(%q(div[contains(@class,"yt-lockup-video")]/div))
|
|
|
|
if root
|
|
|
|
link = root.xpath_node(%q(div[contains(@class,"yt-lockup-thumbnail")]/a/@href))
|
|
|
|
if link
|
|
|
|
yield link.content.split("=")[1]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2018-02-03 20:41:59 +00:00
|
|
|
|
2018-02-22 19:01:37 +00:00
|
|
|
def splice(a, b)
|
2018-02-03 20:41:59 +00:00
|
|
|
c = a[0]
|
2018-02-12 04:06:29 +00:00
|
|
|
a[0] = a[b % a.size]
|
|
|
|
a[b % a.size] = c
|
|
|
|
return a
|
|
|
|
end
|
|
|
|
|
2018-02-22 19:01:37 +00:00
|
|
|
def decrypt_signature(a)
|
2018-02-13 16:44:11 +00:00
|
|
|
a = a.split("")
|
2018-02-12 04:06:29 +00:00
|
|
|
|
2018-03-07 14:07:40 +00:00
|
|
|
a.reverse!
|
|
|
|
a = splice(a, 23)
|
2018-02-22 19:01:37 +00:00
|
|
|
a.delete_at(0..2)
|
2018-03-07 14:07:40 +00:00
|
|
|
a.reverse!
|
|
|
|
a = splice(a, 45)
|
|
|
|
a.reverse!
|
|
|
|
a = splice(a, 66)
|
2018-03-07 04:03:45 +00:00
|
|
|
|
2018-02-13 16:44:11 +00:00
|
|
|
return a.join("")
|
|
|
|
end
|
|
|
|
|
2018-02-05 23:56:40 +00:00
|
|
|
def rank_videos(db, n)
|
|
|
|
top = [] of {Float64, String}
|
|
|
|
|
|
|
|
db.query("SELECT id, wilson_score, published FROM videos WHERE views > 5000 ORDER BY published DESC LIMIT 10000") do |rs|
|
|
|
|
rs.each do
|
|
|
|
id = rs.read(String)
|
|
|
|
wilson_score = rs.read(Float64)
|
|
|
|
published = rs.read(Time)
|
|
|
|
|
|
|
|
# Exponential decay, older videos tend to rank lower
|
2018-02-10 16:06:37 +00:00
|
|
|
temperature = wilson_score * Math.exp(-0.000005*((Time.now - published).total_minutes))
|
2018-02-05 23:56:40 +00:00
|
|
|
top << {temperature, id}
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
top.sort!
|
|
|
|
|
|
|
|
# Make hottest come first
|
|
|
|
top.reverse!
|
|
|
|
top = top.map { |a, b| b }
|
|
|
|
|
|
|
|
# Return top
|
2018-02-13 19:14:21 +00:00
|
|
|
return top[0..n - 1]
|
2018-02-05 23:56:40 +00:00
|
|
|
end
|
2018-02-06 01:07:49 +00:00
|
|
|
|
2018-03-05 04:25:03 +00:00
|
|
|
def make_client(url)
|
|
|
|
context = OpenSSL::SSL::Context::Client.new
|
|
|
|
context.verify_mode = OpenSSL::SSL::VerifyMode::NONE
|
|
|
|
context.add_options(
|
|
|
|
OpenSSL::SSL::Options::ALL |
|
|
|
|
OpenSSL::SSL::Options::NO_SSL_V2 |
|
|
|
|
OpenSSL::SSL::Options::NO_SSL_V3
|
|
|
|
)
|
2018-02-27 00:59:02 +00:00
|
|
|
client = HTTP::Client.new(url, context)
|
2018-03-04 16:59:03 +00:00
|
|
|
client.read_timeout = 10.seconds
|
|
|
|
client.connect_timeout = 10.seconds
|
2018-02-06 01:07:49 +00:00
|
|
|
return client
|
|
|
|
end
|
2018-03-03 21:06:14 +00:00
|
|
|
|
2018-03-04 16:59:03 +00:00
|
|
|
def get_reddit_comments(id, client, headers)
|
2018-03-04 15:31:26 +00:00
|
|
|
query = "(url:3D#{id}%20OR%20url:#{id})%20(site:youtube.com%20OR%20site:youtu.be)"
|
2018-03-04 16:59:03 +00:00
|
|
|
search_results = client.get("/search.json?q=#{query}", headers)
|
2018-03-09 16:47:50 +00:00
|
|
|
|
2018-03-04 16:59:03 +00:00
|
|
|
if search_results.status_code == 200
|
2018-03-04 01:10:25 +00:00
|
|
|
search_results = RedditSubmit.from_json(search_results.body)
|
2018-03-09 16:47:50 +00:00
|
|
|
|
2018-03-04 01:10:25 +00:00
|
|
|
thread = search_results.data.children.sort_by { |child| child.data.score }[-1]
|
2018-03-09 16:47:50 +00:00
|
|
|
result = client.get("/r/#{thread.data.subreddit}/comments/#{thread.data.id}?limit=100&sort=top", headers).body
|
2018-03-04 01:10:25 +00:00
|
|
|
result = JSON.parse(result)
|
2018-03-04 16:59:03 +00:00
|
|
|
elsif search_results.status_code == 302
|
|
|
|
search_results = client.get(search_results.headers["Location"], headers).body
|
|
|
|
|
|
|
|
result = JSON.parse(search_results)
|
|
|
|
thread = RedditThread.from_json(result[0]["data"]["children"][0].to_json)
|
|
|
|
else
|
|
|
|
raise "Got error code #{search_results.status_code}"
|
2018-03-04 01:10:25 +00:00
|
|
|
end
|
|
|
|
|
2018-03-04 16:59:03 +00:00
|
|
|
comments = result[1]["data"]["children"]
|
2018-03-04 01:10:25 +00:00
|
|
|
return comments, thread
|
2018-03-03 21:06:14 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
def template_comments(root)
|
|
|
|
html = ""
|
|
|
|
root.each do |child|
|
|
|
|
if child["data"]["body_html"]?
|
|
|
|
author = child["data"]["author"]
|
|
|
|
score = child["data"]["score"]
|
|
|
|
body_html = HTML.unescape(child["data"]["body_html"].as_s)
|
|
|
|
|
2018-03-07 04:00:35 +00:00
|
|
|
# Replace local links wtih links back to Reddit
|
|
|
|
body_html = fill_links(body_html, "https", "www.reddit.com")
|
|
|
|
|
2018-03-03 21:06:14 +00:00
|
|
|
replies_html = ""
|
|
|
|
if child["data"]["replies"] != ""
|
|
|
|
replies_html = template_comments(child["data"]["replies"]["data"]["children"])
|
|
|
|
end
|
|
|
|
|
|
|
|
content = <<-END_HTML
|
|
|
|
<p>
|
2018-03-07 04:03:45 +00:00
|
|
|
<a href="javascript:void(0)" onclick="toggle(this)">[ - ]</a> #{score} <b>#{author}</b>
|
2018-03-03 21:06:14 +00:00
|
|
|
</p>
|
2018-03-04 04:36:18 +00:00
|
|
|
<div>
|
|
|
|
#{body_html}
|
2018-03-03 21:06:14 +00:00
|
|
|
#{replies_html}
|
2018-03-04 04:36:18 +00:00
|
|
|
</div>
|
2018-03-03 21:06:14 +00:00
|
|
|
END_HTML
|
|
|
|
|
|
|
|
if child["data"]["depth"].as_i > 0
|
|
|
|
html += <<-END_HTML
|
|
|
|
<div class="pure-g">
|
|
|
|
<div class="pure-u-1-24"></div>
|
|
|
|
<div class="pure-u-23-24">
|
|
|
|
#{content}
|
|
|
|
</div>
|
|
|
|
</div>
|
|
|
|
END_HTML
|
|
|
|
else
|
|
|
|
html += <<-END_HTML
|
|
|
|
<div class="pure-g">
|
|
|
|
<div class="pure-u-1">
|
|
|
|
#{content}
|
|
|
|
</div>
|
|
|
|
</div>
|
|
|
|
END_HTML
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
return html
|
|
|
|
end
|
2018-03-03 21:10:56 +00:00
|
|
|
|
|
|
|
def number_with_separator(number)
|
|
|
|
number.to_s.reverse.gsub(/(\d{3})(?=\d)/, "\\1,").reverse
|
|
|
|
end
|
2018-03-04 14:54:19 +00:00
|
|
|
|
|
|
|
def arg_array(array)
|
|
|
|
args = [] of String
|
|
|
|
(1..array.size).each { |i| args << "($#{i})," }
|
|
|
|
args = args.join("")
|
|
|
|
args = args.chomp(",")
|
|
|
|
|
|
|
|
return args
|
|
|
|
end
|
2018-03-07 04:00:35 +00:00
|
|
|
|
|
|
|
def add_alt_links(html)
|
|
|
|
alt_links = [] of {Int32, String}
|
|
|
|
|
|
|
|
# This is painful but is likely the only way to accomplish this in Crystal,
|
|
|
|
# as Crystigiri and others are not able to insert XML Nodes into a document.
|
|
|
|
# The goal here is to use as little regex as possible
|
|
|
|
html.scan(/<a[^>]*>([^<]+)<\/a>/) do |match|
|
|
|
|
anchor = XML.parse_html(match[0])
|
|
|
|
anchor = anchor.xpath_node("//a").not_nil!
|
|
|
|
url = URI.parse(HTML.unescape(anchor["href"]))
|
|
|
|
|
2018-03-08 04:15:35 +00:00
|
|
|
if ["www.youtube.com", "m.youtube.com"].includes?(url.host) && url.path == "/watch" || url.host == "youtu.be"
|
2018-03-07 04:00:35 +00:00
|
|
|
alt_link = <<-END_HTML
|
2018-03-07 04:03:45 +00:00
|
|
|
<a href="#{url.full_path}">
|
2018-03-07 04:00:35 +00:00
|
|
|
<i class="fa fa-link" aria-hidden="true"></i>
|
|
|
|
</a>
|
|
|
|
END_HTML
|
|
|
|
|
|
|
|
alt_links << {match.end.not_nil!, alt_link}
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
alt_links.reverse!
|
|
|
|
alt_links.each do |position, alt_link|
|
|
|
|
html = html.insert(position, alt_link)
|
|
|
|
end
|
|
|
|
|
|
|
|
return html
|
|
|
|
end
|
|
|
|
|
|
|
|
def fill_links(html, scheme, host)
|
|
|
|
html = XML.parse_html(html)
|
|
|
|
|
|
|
|
html.xpath_nodes("//a").each do |match|
|
|
|
|
url = URI.parse(match["href"])
|
|
|
|
if !url.host # If reddit link
|
|
|
|
url.scheme = scheme
|
|
|
|
url.host = host
|
|
|
|
match["href"] = url
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
html = html.to_xml
|
|
|
|
end
|