summaryrefslogtreecommitdiff
path: root/plugin/hackernews.py
blob: b27be67380138641eb8b12933517478ffb8ab321 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import HTMLParser
import json
import re
import textwrap
import urllib2
import vim


API_URL = "http://node-hnapi.herokuapp.com"


def hacker_news():
    vim.command("edit .hackernews")
    vim.command("setlocal noswapfile")
    vim.command("setlocal buftype=nofile")

    b = vim.current.buffer
    b[0] = "Hacker News"
    b.append("===========")
    b.append("")

    news1 = json.loads(urllib2.urlopen(API_URL+"/news").read())
    news2 = json.loads(urllib2.urlopen(API_URL+"/news2").read())
    for i, item in enumerate(news1+news2):
        line = "%d. %s (%d comments) [%d]"
        line %= (i+1, item['title'], item['comments_count'], item['id'])
        b.append(line)
        b.append("")


def hacker_news_link():
    line = vim.current.line
    m = re.search(r"\[(http.*)\]", line)
    if m:
        vim.command("edit .hackernews")
        b = vim.current.buffer
        content = urllib2.urlopen("http://fuckyeahmarkdown.com/go/?read=1&u="+m.group(1)).read()
        for i, line in enumerate(content.split('\n')):
            if not line:
                b.append("")
                continue
            line = textwrap.wrap(line, width=80)
            for j, wrap in enumerate(line):
                if i == 0 and j == 0:
                    b[0] = wrap
                else:
                    b.append(wrap)
        return

    start, end = line.rfind('['), line.rfind(']')
    if start < 0 or end < 0:
        print "HackerNews.vim Error: Could not parse [item id]"
        return
    id = line[start+1:end]

    item = json.loads(urllib2.urlopen(API_URL+"/item/"+id).read())

    vim.command("edit .hackernews")
    b = vim.current.buffer
    b[0] = item['title']
    b.append("Posted %s by %s" % (item['time_ago'], item['user']))
    b.append("%d Points / %d Comments" % (item['points'], item['comments_count']))
    b.append("[%s]" % item['url'])
    b.append("")
    b.append("")
    print_comments(item['comments'], b)


html = HTMLParser.HTMLParser()

def print_comments(comments, b):
    for comment in comments:
        level = comment['level']
        b.append("%sComment by %s %s:" % ("\t"*level, comment.get('user','???'), comment['time_ago']))
        for p in comment['content'].split("<p>"):
            p = html.unescape(p)
            # Convert <a href="http://url/">Text</a> tags
            # to markdown equivalent: (Text)[http://url/]
            s = p.find("a>")
            while s > 0:
                s += 2
                section = p[:s]
                m = re.search(r"<a.*href=[\"\']([^\"\']*)[\"\'].*>(.*)</a>", section)
                # Do not bother with anchor text if it is same as href url
                if m.group(1)[:20] == m.group(2)[:20]:
                    p = p.replace(m.group(0), "[%s]" % m.group(1))
                else:
                    p = p.replace(m.group(0), "(%s)[%s]" % (m.group(2), m.group(1)))
                s = p.find("a>")

            contents = textwrap.wrap(re.sub('<[^<]+?>', '', p),
                                     width=80,
                                     initial_indent=" "*4*level,
                                     subsequent_indent=" "*4*level)
            for line in contents:
                b.append(line)
            if contents:
                b.append("")
        b.append("")
        print_comments(comment['comments'], b)