Elasticsearch系列(十)----使用webmagic爬取数据导入到ES
来源:互联网 发布:淘宝卖办公用品 编辑:程序博客网 时间:2024/06/15 02:21
webmagic主要有两个文件
一个是对爬取页面进行处理,一个是对页面处理之后的数据进行保存:
CSDNPageProcessor
package com.fendo.webmagic;import java.io.IOException;import java.net.InetAddress;import java.net.UnknownHostException;import java.util.List;import com.fasterxml.jackson.core.JsonProcessingException;import com.fasterxml.jackson.databind.ObjectMapper;import org.elasticsearch.action.index.IndexResponse;import org.elasticsearch.client.transport.TransportClient;import org.elasticsearch.common.settings.Settings;import org.elasticsearch.common.transport.InetSocketTransportAddress;import org.elasticsearch.transport.client.PreBuiltTransportClient;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.context.ApplicationContext;import org.springframework.context.support.FileSystemXmlApplicationContext;import com.fendo.common.ClientFactory;import com.fendo.common.CommonUtils;import com.fendo.entity.CsdnBlog;import io.searchbox.client.JestClient;import io.searchbox.client.JestResult;import io.searchbox.indices.CreateIndex;import us.codecraft.webmagic.Page;import us.codecraft.webmagic.Site;import us.codecraft.webmagic.Spider;import us.codecraft.webmagic.processor.PageProcessor;/** * CSDN页面爬取 * @author fendo * *///@RunWith(SpringJUnit4ClassRunner.class)//@WebAppConfiguration//@ContextConfiguration(locations = {"classpath:applicationContext.xml"})public class CSDNPageProcessor implements PageProcessor{ @Autowired private static JdbcPipeline jdbcPipeline; private static String username="u011781521"; // 设置csdn用户名 private static int size = 0;// 共抓取到的文章数量 private JestClient jestClient; // 抓取网站的相关配置,包括:编码、抓取间隔、重试次数等 private Site site = Site.me() .setRetryTimes(3) .setSleepTime(1000) .setUserAgent("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"); @Overridepublic Site getSite() {return site;}@Overridepublic void process(Page page) {// 列表页 if (!page.getUrl().regex("http://blog\\.csdn\\.net/" + username + "/article/details/\\d+").match()) { // 添加所有文章页 page.addTargetRequests(page.getHtml().xpath("//div[@id='article_list']").links()// 限定文章列表获取区域 .regex("/" + username + "/article/details/\\d+") .replace("/" + username + "/", "http://blog.csdn.net/" + username + "/")// 巧用替换给把相对url转换成绝对url .all()); // 添加其他列表页 page.addTargetRequests(page.getHtml().xpath("//div[@id='papelist']").links()// 限定其他列表页获取区域 .regex("/" + username + "/article/list/\\d+") .replace("/" + username + "/", "http://blog.csdn.net/" + username + "/")// 巧用替换给把相对url转换成绝对url .all()); // 文章页 } else { size++;// 文章数量加1 page.putField("key", Integer.parseInt(page.getUrl().regex("http://blog\\.csdn\\.net/" + username + "/article/details/(\\d+)").get())); page.putField("title", CommonUtils.replaceHTML(page.getHtml().xpath("//div[@class='article_title']//span[@class='link_title']/a/text()").get())); page.putField("content",CommonUtils.replaceHTML(page.getHtml().xpath("//div[@class='article_content']/allText()").get())); page.putField("dates",page.getHtml().xpath("//div[@class='article_r']/span[@class='link_postdate']/text()").get()); System.out.println("+++++++++++++++date:"+page.getHtml().xpath("//div[@class='article_r']/span[@class='link_postdate']/text()").get()); page.putField("tags",CommonUtils.replaceHTML(listToString(page.getHtml().xpath("//div[@class='article_l']/span[@class='link_categories']/a/allText()").all()))); page.putField("category",CommonUtils.replaceHTML(listToString(page.getHtml().xpath("//div[@class='category_r']/label/span/text()").all()))); page.putField("view", Integer.parseInt(page.getHtml().xpath("//div[@class='article_r']/span[@class='link_view']").regex("(\\d+)人阅读").get())); page.putField("comments",Integer.parseInt(page.getHtml().xpath("//div[@class='article_r']/span[@class='link_comments']").regex("\\((\\d+)\\)").get())); page.putField("copyright",page.getHtml().regex("bog_copyright").match() ? 1 : 0); page.putField("url", page.getUrl().get()); //创建索引 ObjectMapper mapper = new ObjectMapper(); //创建client TransportClient client; CsdnBlog csdnBlog = new CsdnBlog(); csdnBlog.setId(size); csdnBlog.setTags((String)page.getResultItems().get("tags")); csdnBlog.setKeyes((Integer)page.getResultItems().get("key")); csdnBlog.setTitles((String)page.getResultItems().get("title")); csdnBlog.setDates((String)page.getResultItems().get("dates")); csdnBlog.setCategory((String)page.getResultItems().get("category")); csdnBlog.setViews((Integer)page.getResultItems().get("view")); csdnBlog.setComments((Integer)page.getResultItems().get("comments")); csdnBlog.setCopyright((Integer)page.getResultItems().get("copyright")); csdnBlog.setContent((String)page.getResultItems().get("content")); try { //设置集群名称 Settings settings = Settings.builder().put("cluster.name", "my-application").build();// 集群名client = new PreBuiltTransportClient(settings) .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("127.0.0.1"), 9300));IndexResponse response = client.prepareIndex("csdnblog", "article").setSource(mapper.writeValueAsString(csdnBlog)).execute().actionGet();System.out.println(response.toString());} catch (Exception e) {e.printStackTrace();} // 把对象输出控制台 //System.out.println("获取的数据:"+page.toString()); } } // 把list转换为string,用,分割 public static String listToString(List<String> stringList) { if (stringList == null) { return null; } StringBuilder result = new StringBuilder(); boolean flag = false; for (String string : stringList) { if (flag) { result.append(","); } else { flag = true; } result.append(string); } return result.toString(); } public static void main(String[] args) { long startTime, endTime; System.out.println("【爬虫开始】..."); startTime = System.currentTimeMillis(); //ApplicationContext applicationContext = new ClassPathXmlApplicationContext("applicationContext.xml"); ApplicationContext applicationContext = new FileSystemXmlApplicationContext( "classpath:applicationContext.xml"); JdbcPipeline jdbcPipeline=(JdbcPipeline)applicationContext.getBean("jdbcPipeline"); System.out.println(jdbcPipeline.toString()); Spider.create(new CSDNPageProcessor()) .addUrl("http://blog.csdn.net/u011781521/article/list/1") //.addUrl("http://blog.csdn.net/u011781521/article/list/1") .addPipeline(jdbcPipeline) .thread(5) .run(); // 从用户博客首页开始抓,开启5个线程,启动爬虫 // Spider.create(new CsdnBlogPageProcessor()).addUrl("http://blog.csdn.net/" + username).thread(5).run(); endTime = System.currentTimeMillis(); System.out.println("【爬虫结束】共抓取" + size + "篇文章,耗时约" + ((endTime - startTime) / 1000) + "秒,已保存到数据库,请查收!"); } }
注意:
在上面的代码中,不但通过jdbcPipeline保存了数据,还通过TransportClient 往ES中保存了数据!!
JdbcPipeline
package com.fendo.webmagic;import java.util.Map;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.stereotype.Component;import org.springframework.stereotype.Service;import com.fendo.entity.CsdnBlog;import com.fendo.mapper.CsdnBlogMapper;import us.codecraft.webmagic.ResultItems;import us.codecraft.webmagic.Task;import us.codecraft.webmagic.pipeline.Pipeline;@Component("jdbcPipeline")public class JdbcPipeline implements Pipeline{@AutowiredCsdnBlogMapper csdnBlogMapper;@Overridepublic void process(ResultItems resultItems, Task task) {Map<String,Object> items = resultItems.getAll(); if(resultItems!=null&&resultItems.getAll().size()>0){ CsdnBlog csdnBlog = new CsdnBlog(); csdnBlog.setTags((String)items.get("tags")); csdnBlog.setKeyes((Integer)items.get("key")); csdnBlog.setTitles((String)items.get("title")); csdnBlog.setDates((String)items.get("dates")); csdnBlog.setCategory((String)items.get("category")); csdnBlog.setViews((Integer)items.get("view")); csdnBlog.setComments((Integer)items.get("comments")); csdnBlog.setCopyright((Integer)items.get("copyright")); csdnBlog.setContent((String)items.get("content")); System.out.println("-----------------------------------------------------------------------process:"+csdnBlog.toString()); csdnBlogMapper.insert(csdnBlog);} }}
对应的数据库脚本:
CREATE TABLE `csdnblog` ( `id` int(11) unsigned NOT NULL AUTO_INCREMENT, `keyes` int(11) unsigned NOT NULL, `titles` varchar(255) NOT NULL, `content` varchar(10240) NOT NULL, `dates` varchar(255) DEFAULT NULL, `tags` varchar(255) DEFAULT NULL, `category` varchar(255) DEFAULT NULL, `views` int(11) unsigned DEFAULT NULL, `comments` int(11) unsigned DEFAULT NULL, `copyright` int(20) unsigned DEFAULT NULL, `url` varchar(255) DEFAULT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB AUTO_INCREMENT=3301 DEFAULT CHARSET=utf8;
完整项目: http://download.csdn.net/download/u011781521/9966717
阅读全文
0 0
- Elasticsearch系列(十)----使用webmagic爬取数据导入到ES
- Elasticsearch系列(九)----使用Logstash-input-jdbc同步数据库中的数据到ES
- webmagic爬取腾讯nba数据
- 使用webmagic 爬取天气网站
- 使用webmagic 爬取中关村评论
- Logstash 导入数据到Elasticsearch
- 使用python,批量导入数据到elasticsearch中
- java 使用webmagic 爬虫框架爬取博客园数据存入数据库
- elasticsearch bulk数据--ES批量导入json数据
- elasticsearch 使用bulk导入数据
- 【爬虫】WebMagic结合Spring mvc爬取数据进行存储
- 【爬虫】WebMagic结合Spring mvc爬取数据进行存储
- ES-MongoDB学习3_MongoDB数据同步到Elasticsearch中
- python实现MongoDB数据同步到ES(Elasticsearch)
- mongo-connector导入数据到Elasticsearch
- 将Mysql数据导入到ElasticSearch集群
- 将Mysql数据导入到ElasticSearch集群
- mongo-connector导入数据到Elasticsearch
- 后端数据展示到前端
- HBase 和 Cassandra的浅谈
- python3网络爬虫:爬取煎蛋网美女照片
- AndroidTv Home界面实现原理(二)——Leanback 库的主页卡位缩放动画源码解析
- C++学习笔记【第二部分第十一章:关联容器】
- Elasticsearch系列(十)----使用webmagic爬取数据导入到ES
- 温故知新(7)委托(一)delegate、Action、Func
- Android Api demo系列(19) (Graphics>AnimateDrawables)
- SCI期刊
- ***hdu6183
- Tomcat的配置使用
- react-native run-android 报错
- UE4:actor运动过程中的几种检测
- CentOS 7.0下使用yum安装MySQL