抓取网页内容

来源:互联网 发布:js的非阻塞sleep函数 编辑:程序博客网 时间:2024/06/06 01:47
import java.io.BufferedReader;import java.io.BufferedWriter;import java.io.File;import java.io.FileOutputStream;import java.io.IOException;import java.io.InputStreamReader;import java.io.OutputStreamWriter;import java.net.HttpURLConnection;import java.net.MalformedURLException;import java.net.URL;import java.util.List;import java.util.Map;/** * 抓取web站点内容的步骤  * 1.创建一个URL资源定位对象(url)。 * 2.调用URL的openConnectioin()方法,得到HttpURLConnection对象 3. * 3.HttpURLConnection的connect()方法建立连接 * 4.getHeaderFields()获得响应的头信息 * 5.getInputStream()获得输入流对象 * @author Administrator *  */public class CatchWebContent {public static void main(String[] args) {try {URL url = new URL("http://www.csdn.net/");// 返回的是URLConnection,转换为HttpURLConnectionHttpURLConnection httpURLConnection = (HttpURLConnection) url.openConnection();httpURLConnection.connect(); // 打开连接//拿到连接的头信息Map<String, List<String>> header = httpURLConnection.getHeaderFields();for (String key : header.keySet()) {System.out.println(key + ": " + header.get(key));}//InputStreamReader第二个参数为编码类型BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(httpURLConnection.getInputStream(),"utf-8"));String str = null;File file = new File("d:/catch/csdn.txt");if (!file.exists()) {file.getParentFile().mkdirs();file.createNewFile();}while ((str = bufferedReader.readLine()) != null) {BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file, true)));bufferedWriter.write(str);bufferedWriter.newLine();bufferedWriter.flush();}httpURLConnection.disconnect();bufferedReader.close();} catch (MalformedURLException e) {e.printStackTrace();} catch (IOException e) {e.printStackTrace();}}}


返回的头信息为:


原创粉丝点击