java - 为什么log4j2不起作用?

标签 java gradle log4j slf4j log4j2

这是我的项目结构:
enter image description here

build.gradle

apply plugin: 'java'
apply plugin: 'eclipse'
apply plugin: 'idea'

jar {
    baseName = 'dcv_crawler_engine'
    version = '1.0.0-SNAPSHOT'
}


repositories {
    jcenter()
}

dependencies {    
    compile 'edu.uci.ics:crawler4j:4.2'
    compile 'org.apache.logging.log4j:log4j-api:2.5'

    testCompile 'junit:junit:4.12'
    testCompile 'edu.uci.ics:crawler4j:4.2'
    testCompile 'org.apache.logging.log4j:log4j-api:2.5'
}

EntryPoint.java
package com.dcvsolution.crawler;

import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

public class EntryPoint {

    /**
     * For logging.
     */
    private static final Logger logger = LogManager.getLogger();

    public static void main(String[] args) throws Exception {

        logger.info("Bat dau crawling.");

        String crawlStorageFolder = "/data/crawl/root";
        int numberOfCrawlers = 7;

        CrawlConfig config = new CrawlConfig();
        config.setCrawlStorageFolder(crawlStorageFolder);

        /*
         * Instantiate the controller for this crawl.
         */
        PageFetcher pageFetcher = new PageFetcher(config);
        RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
        RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
        CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);

        /*
         * For each crawl, you need to add some seed urls. These are the first
         * URLs that are fetched and then the crawler starts following links
         * which are found in these pages
         */
        controller.addSeed("http://www.ics.uci.edu/~lopes/");
        controller.addSeed("http://www.ics.uci.edu/~welling/");
        controller.addSeed("http://www.ics.uci.edu/");

        /*
         * Start the crawl. This is a blocking operation, meaning that your code
         * will reach the line after this only when crawling is finished.
         */
        logger.info("Bat dau crawling.");
        controller.start(MyCrawler.class, numberOfCrawlers);        

    }

}

MyCrawler.java
package com.dcvsolution.crawler;

import java.util.Set;
import java.util.regex.Pattern;
import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

public class MyCrawler extends WebCrawler {

    private static final Logger logger = LogManager.getLogger();

    private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|gif|jpg" + "|png|mp3|mp3|zip|gz))$");

    /**
     * This method receives two parameters. The first parameter is the page in
     * which we have discovered this new url and the second parameter is the new
     * url. You should implement this function to specify whether the given url
     * should be crawled or not (based on your crawling logic). In this example,
     * we are instructing the crawler to ignore urls that have css, js, git, ...
     * extensions and to only accept urls that start with
     * "http://www.ics.uci.edu/". In this case, we didn't need the referringPage
     * parameter to make the decision.
     */
    @Override
    public boolean shouldVisit(Page referringPage, WebURL url) {
        logger.info("Quet trang.");
        String href = url.getURL().toLowerCase();
        return !FILTERS.matcher(href).matches() && href.startsWith("http://www.ics.uci.edu/");
    }

    /**
     * This function is called when a page is fetched and ready to be processed
     * by your program.
     */
    @Override
    public void visit(Page page) {
        String url = page.getWebURL().getURL();
        System.out.println("URL: " + url);

        if (page.getParseData() instanceof HtmlParseData) {
            HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
            String text = htmlParseData.getText();
            String html = htmlParseData.getHtml();
            Set<WebURL> links = htmlParseData.getOutgoingUrls();

            logger.info("Quet trang.");

            System.out.println("Text length: " + text.length());
            System.out.println("Html length: " + html.length());
            System.out.println("Number of outgoing links: " + links.size());
        }
    }
}

log4j2.properties
log4j.rootLogger=DEBUG, stdout

帮帮我,为什么log4j不起作用?

enter image description here

最佳答案

您需要log4j-api和log4j-core依赖项。

当前,您只有api依赖项。
将此添加到您的编译依赖项:

'org.apache.logging.log4j:log4j-core:2.5'

关于java - 为什么log4j2不起作用?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/36273825/

相关文章:

java - Eclipse Luna - 导航器 View 中不显示错误图标

java - 将Java连接到MySQL数据库

android - 使 AndroidAnnotations Maven 设置适应 Gradle

android - 自动接受所有 SDK 许可

android - 如何运行我的 Android 自定义构建类型?

java - 有什么方法可以区分一个类是来自 Groovy/Grails 还是原始 Java?

logging - JBoss AS 7 配置日志记录到 Syslog Appender

java - 为什么 JFrame 对象似乎保持 Activity 状态,即使没有对它的引用?

java - Quartz - 改变当前时间的确定方式?

java - log4j.properties 简单问题