From e943faa33a3c4149aab2eab103f235f99849a320 Mon Sep 17 00:00:00 2001 From: Vincent Le Gallic Date: Wed, 10 Apr 2013 05:47:37 +0200 Subject: [PATCH] =?utf8?q?On=20peut=20r=C3=A9cup=C3=A9rer=20les=20vid?= =?utf8?q?=C3=A9os=20des=20cha=C3=AEnes=20Youtube=20gr=C3=A2ce=20=C3=A0=20?= =?utf8?q?l'API=20Youtube?= MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit --- today_server.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/today_server.py b/today_server.py index 98e4844..1be4d3f 100755 --- a/today_server.py +++ b/today_server.py @@ -14,6 +14,7 @@ import time, datetime import locale import re import BeautifulSoup +from lxml import etree import os import sys import urllib @@ -118,7 +119,31 @@ def last_noob_warpzone(): ###### ###### RHÂ, shit, y'a des fuckings trucs du genre "1er" dans les dates… :/ return noobs, warpzones - + +def parse_youtube(username): + """Récupère les vidéos d'une chaîne Youtube""" + link = "https://gdata.youtube.com/feeds/api/users/%s/uploads?start-index=1&max-results=50" % (username,) + entries = [] + while link: + p = urllib.urlopen(link) + t = p.read() + x = etree.fromstring(t) + # lxml ne supporte pas les namespaces vides dans les requêtes XPath + ns = x.nsmap + ns["default"] = ns[None] + ns.pop(None) + # Il y a potentiellement une suite + nextlinks = x.xpath("//default:link[@rel='next']", namespaces=ns) + if nextlinks: + link = nextlinks[0].attrib["href"] + else: + link = False + localentries = x.xpath("//default:entry", namespaces=ns) + entries.extend(localentries) + titles = [e.xpath(".//default:title", namespaces=ns)[0].text for e in entries] + return titles + + def get_file(): """Récupère la liste des derniers ids de chaque truc, stockée dans le fichiers.""" f = open(store_published_file) -- 2.39.2