module LWN.Page
where
-import qualified Data.Map as Map
+import qualified Data.Map as Map (lookup)
import Data.Time (getCurrentTime)
-import System.IO (Handle)
import qualified Data.ByteString.Lazy as B (ByteString, hPut)
-import Data.List (isInfixOf)
import Data.String.Utils (split, strip)
import Data.Maybe (catMaybes, fromJust, isNothing)
-import Data.Tree.NTree.TypeDefs (NTree)
+import Prelude hiding (readFile)
+import System.Directory (doesFileExist)
+import System.IO (Handle, hClose, hFlush, hPutStrLn, stderr)
+import System.IO.UTF8 (readFile)
import Test.HUnit (Assertion, assertEqual)
import Test.Framework (Test, testGroup)
import Test.Framework.Providers.HUnit (testCase)
-import Text.Pandoc
+import Text.Pandoc (
+ defaultParserState,
+ defaultWriterOptions,
+ readHtml,
+ writeEPUB,
+ writerEPUBMetadata)
import Text.XML.HXT.Core (
ArrowXml,
IOSArrow,
+ IOStateArrow,
XmlTree,
- XNode,
(>>>),
(/>),
(//>),
runX,
setElemName,
xshow,
- when
- )
+ when)
import Text.HandsomeSoup (css, parseHtml)
+import Configuration (Cfg, password, use_account, username)
import LWN.Article
-import LWN.HTTP (save_image)
+import LWN.HTTP (
+ ImageMap,
+ download_image_urls,
+ get_page,
+ log_in,
+ make_cookie_jar)
import LWN.URI (URL, try_make_absolute_url)
-import XHTML
+import LWN.XHTML (XHTML, parse_lwn, to_xhtml)
+import Misc (contains)
+
+
+-- | Try to parse the given article using HXT. We try a few different
+-- methods; if none of them work, we return 'Nothing'.
+get_xml_from_article :: Cfg -> URL -> IO (Maybe (IOStateArrow s b XmlTree))
+get_xml_from_article cfg article_name = do
+ my_article <- real_article_path article_name
+ is_file <- doesFileExist my_article
+ case is_file of
+ True -> do
+ contents <- readFile my_article
+ return $ Just $ parse_lwn contents
+ False -> do
+ -- Download the URL and try to parse it.
+ if use_account cfg then do
+ -- use_account would be false if these fromJusts would fail.
+ cj <- make_cookie_jar
+ li_result <- log_in cj
+ (fromJust $ username cfg)
+ (fromJust $ password cfg)
+
+ case li_result of
+ Left err -> do
+ let msg = "Failed to log in. " ++ err
+ hPutStrLn stderr msg
+ Right response_body -> do
+ hPutStrLn stderr response_body
+
+ html <- get_page (Just cj) my_article
+
+ case html of
+ Left err -> do
+ let msg = "Failed to retrieve page. " ++ err
+ hPutStrLn stderr msg
+ return Nothing
+ Right h -> return $ Just $ parse_lwn h
+ else do
+ html <- get_page Nothing my_article
+ case html of
+ Left err -> do
+ let msg = "Failed to retrieve page. " ++ err
+ hPutStrLn stderr msg
+ return Nothing
+ Right h -> return $ Just $ parse_lwn h
+
--- Map absolute image URLs to local system file paths where the image
--- referenced by the URL is stored.
-type ImageMap = Map.Map URL FilePath
-- Should be called *after* preprocessing.
-download_images :: IOSArrow XmlTree (NTree XNode) -> IO ImageMap
+download_images :: IOSArrow XmlTree XmlTree -> IO ImageMap
download_images xml = do
image_urls <- runX $ xml >>> image_srcs
- files <- mapM save_image image_urls
- let pairs = zip image_urls files
- return $ foldl my_insert empty_map pairs
- where
- empty_map = Map.empty :: ImageMap
-
- my_insert :: ImageMap -> (URL, Maybe FilePath) -> ImageMap
- my_insert dict (_, Nothing) = dict
- my_insert dict (k, Just v) = Map.insert k v dict
+ download_image_urls image_urls
data Page =
+page_from_url :: Cfg -> URL -> IO (Maybe Page)
+page_from_url cfg url = do
+ maybe_html <- get_xml_from_article cfg url
+ case maybe_html of
+ Just html -> parse html
+ Nothing -> return Nothing
+
+
is_link :: (ArrowXml a) => a XmlTree XmlTree
is_link =
isElem >>> hasName "a"
remove_comment_links :: (ArrowXml a) => a XmlTree XmlTree
remove_comment_links =
processTopDown $ kill_comments `when` is_link
- where
- contains = isInfixOf
-
+ where
is_comment_link =
hasAttrValue "href" (contains "#Comments")
processAttrl $ (change_src `when` (hasName "src"))
-parse :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe Page)
+parse :: IOSArrow XmlTree XmlTree -> IO (Maybe Page)
parse xml = do
let clean_xml = xml >>> preprocess
image_map <- download_images clean_xml
return $
if (isNothing appr) then
fppr
- else
+ else
appr
-parse_headline :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe String)
+parse_headline :: IOSArrow XmlTree XmlTree -> IO (Maybe String)
parse_headline xml = do
let element_filter = xml >>> css "div.PageHeadline h1"
let element_text_filter = element_filter /> getText
_ -> error "Found more than one headline."
-parse_byline :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe String)
+parse_byline :: IOSArrow XmlTree XmlTree -> IO (Maybe String)
parse_byline xml = do
let element_filter = xml >>> css "div.FeatureByLine"
let element_text_filter = element_filter /> getText
--
-- ArticlePage Stuff
--
-ap_parse :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe Page)
+ap_parse :: IOSArrow XmlTree XmlTree -> IO (Maybe Page)
ap_parse xml = do
- arts <- ap_parse_articles xml
+ arts <- ap_parse_articles xml
case arts of
[x] -> return $ Just $ ArticlePage x
_ -> return Nothing
-
-ap_parse_body :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe String)
+
+ap_parse_body :: IOSArrow XmlTree XmlTree -> IO (Maybe String)
ap_parse_body xml = do
let element_filter = xml >>> css "div.ArticleText"
let element_html_filter = xshow element_filter
_ -> error "Found more than one article."
-ap_parse_articles :: IOSArrow XmlTree (NTree XNode) -> IO [Article]
+ap_parse_articles :: IOSArrow XmlTree XmlTree -> IO [Article]
ap_parse_articles xml = do
parsed_headline <- parse_headline xml
parsed_byline <- parse_byline xml
if (isNothing parsed_headline) || (isNothing parsed_body)
then return []
- else do
+ else do
let title' = Title $ fromJust parsed_headline
let byline' = Byline parsed_byline
let body' = BodyHtml $ fromJust parsed_body
-
+
return $ [Article title' byline' body']
-- FullPage Stuff
--
-fp_parse :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe Page)
+fp_parse :: IOSArrow XmlTree XmlTree -> IO (Maybe Page)
fp_parse xml = do
hl <- parse_headline xml
parsed_articles <- fp_parse_articles xml
-fp_parse_article_title :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe String)
+fp_parse_article_title :: IOSArrow XmlTree XmlTree -> IO (Maybe String)
fp_parse_article_title xml = do
let element_filter = xml >>> css "h2.SummaryHL"
let element_text_filter = element_filter //> getText
-fp_parse_article_body :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe String)
+fp_parse_article_body :: IOSArrow XmlTree XmlTree -> IO (Maybe String)
fp_parse_article_body xml = do
-- First, delete the article title and byline.
let clean_xml' = xml >>> remove_title >>> remove_byline
[] -> Nothing
_ -> error "Found more than one article body."
-fp_parse_article :: IOSArrow XmlTree (NTree XNode) -> IO (Maybe Article)
+fp_parse_article :: IOSArrow XmlTree XmlTree -> IO (Maybe Article)
fp_parse_article xml = do
parsed_article_title <- fp_parse_article_title xml
parsed_article_byline <- parse_byline xml
let xml = parseHtml $ wrap_in_body_div html
fp_parse_article xml
-
+
-- | In the full page, all of the article titles and bodies are
-- wrapped in one big div.ArticleText.
-parse_bodies :: IOSArrow XmlTree (NTree XNode) -> IOSArrow XmlTree (NTree XNode)
+parse_bodies :: IOSArrow XmlTree XmlTree -> IOSArrow XmlTree XmlTree
parse_bodies xml =
xml >>> css "div.ArticleText"
-fp_parse_articles :: IOSArrow XmlTree (NTree XNode) -> IO [Article]
+fp_parse_articles :: IOSArrow XmlTree XmlTree -> IO [Article]
fp_parse_articles xml = do
bodies <- runX . xshow $ parse_bodies xml
let article_separator = "<h2 class=\"SummaryHL\">"
epmd <- metadata obj
epub <- xhtml_to_epub epmd xhtml
B.hPut handle epub
-
+ hFlush handle
+ hClose handle
xhtml_to_epub :: String -> String -> IO B.ByteString
xhtml_to_epub epmd =
changeAttrValue try_make_absolute_url
make_srcs_absolute :: (ArrowXml a) => a XmlTree XmlTree
- make_srcs_absolute =
+ make_srcs_absolute =
processAttrl $ change_src `when` hasName "src"