600字范文,内容丰富有趣,生活中的好帮手!
600字范文 > 使用htmlUnit抓取同花顺股票数据

使用htmlUnit抓取同花顺股票数据

时间:2020-11-21 09:57:18

相关推荐

使用htmlUnit抓取同花顺股票数据

网站地址

/

请求地址

/index/index/board/all/field/zdf/order/desc/page/4/ajax/1/

其中的 page/4 就是页码

访问网站 抓取每一页的股票数据

private String crawler(String url) {try {WebRequest request = new WebRequest(new URL(url), HttpMethod.GET);Map<String, String> additionalHeaders = new HashMap<>();additionalHeaders.put("User-Agent","Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36");additionalHeaders.put("Accept-Language", "zh-CN,zh;q=0.8");additionalHeaders.put("Accept", "*/*");// 设置请求头信息request.setAdditionalHeaders(additionalHeaders);Page page = webClient.getPage(url);// 将响应信息转为String并返回return page.getWebResponse().getContentAsString();} catch (Exception e) {e.printStackTrace();}return null;}

保存抓到的网页数据

private void work(String url) {try {// 打开网页webClient.getPage(url);int pageCount = 5;for (int i = 1; i <= pageCount; i++) {Thread.sleep(500);// 分页进行抓取String pageUrl = "/index/index/board/all/field/zdf/order/desc/page/" + i + "/ajax/1/";String response = crawler(pageUrl);if (!response.contains("加自选")) {return;} else {saveFile(response, PATH + i + ".html", i);}}} catch (Exception e) {e.printStackTrace();}}

抓取到的股票数据

源码:

import com.gargoylesoftware.htmlunit.*;import com.gargoylesoftware.htmlunit.html.HtmlPage;import java.io.BufferedWriter;import java.io.File;import java.io.FileWriter;import .URL;import java.util.HashMap;import java.util.Map;/*** 同花顺股票数据抓取*/public class StockCrawler {private static String PATH = "I:\\Java\\6_project\\StockSpider\\outPutFile\\";private WebClient webClient;/*** 初始化浏览器*/private StockCrawler() {// 新建一个模拟chrome的浏览器客户端对象webClient = new WebClient(BrowserVersion.CHROME);// 相应选项设置// 当JS执行出错的时候是否抛出异常webClient.getOptions().setThrowExceptionOnScriptError(false);// 当HTTP的状态非200时是否抛出异常webClient.getOptions().setThrowExceptionOnFailingStatusCode(false);webClient.getOptions().setActiveXNative(false);// 是否启用CSS, 因为不需要展现页面, 所以不需要启用webClient.getOptions().setCssEnabled(false);webClient.getOptions().setJavaScriptEnabled(true);// 很重要,设置支持AjaxwebClient.setAjaxController(new NicelyResynchronizingAjaxController());webClient.getOptions().setTimeout(30000);}private String crawler(String url) {try {WebRequest request = new WebRequest(new URL(url), HttpMethod.GET);Map<String, String> additionalHeaders = new HashMap<>();additionalHeaders.put("User-Agent","Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36");additionalHeaders.put("Accept-Language", "zh-CN,zh;q=0.8");additionalHeaders.put("Accept", "*/*");// 设置请求头信息request.setAdditionalHeaders(additionalHeaders);Page page = webClient.getPage(url);// 将响应信息转为String并返回return page.getWebResponse().getContentAsString();} catch (Exception e) {e.printStackTrace();}return null;}private void work(String url) {try {// 打开网页webClient.getPage(url);int pageCount = 5;for (int i = 1; i <= pageCount; i++) {Thread.sleep(500);// 分页进行抓取String pageUrl = "/index/index/board/all/field/zdf/order/desc/page/" + i + "/ajax/1/";String response = crawler(pageUrl);if (!response.contains("加自选")) {return;} else {saveFile(response, PATH + i + ".html", i);}}} catch (Exception e) {e.printStackTrace();}}/*** 保存抓取到的信息** @param content* @param path*/private static void saveFile(String content, String path, int i) {File file = new File(path);if (!file.exists()) {try {file.createNewFile();} catch (Exception e) {e.printStackTrace();}} else {file.delete();}try {FileWriter fw = new FileWriter(file.getAbsoluteFile());BufferedWriter bw = new BufferedWriter(fw);bw.write(content);bw.close();System.out.println("第" + i + "页保存成功!");} catch (Exception e) {System.out.println("第" + i + "页保存失败!");e.printStackTrace();}}public static void main(String[] args) {StockCrawler crawler = new StockCrawler();String indexUrl = "/";crawler.work(indexUrl);}}

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。