加入收藏 | 设为首页 | 会员中心 | 我要投稿 李大同 (https://www.lidatong.com.cn/)- 科技、建站、经验、云计算、5G、大数据,站长网!
当前位置: 首页 > 大数据 > 正文

Golang 处理Kafka消息

发布时间:2020-12-16 18:31:01 所属栏目:大数据 来源:网络整理
导读:package mainimport ("flag""github.com/optiopay/kafka""log""net/http""net/http/pprof""strings""time""ooxx/config""ooxx/lib""ooxx/model")const LOG_CHANNEL_COUNT = 200const LOG_BUFFER_COUNT = 100var debug = flag.String("debug","false","debug
package main

import (
	"flag"
	"github.com/optiopay/kafka"
	"log"
	"net/http"
	"net/http/pprof"
	"strings"
	"time"
	"ooxx/config"
	"ooxx/lib"
	"ooxx/model"
)

const LOG_CHANNEL_COUNT = 200
const LOG_BUFFER_COUNT = 100

var debug = flag.String("debug","false","debug mode")
var queue = make(chan []byte,LOG_CHANNEL_COUNT)
var buffer = make([]string,LOG_BUFFER_COUNT)
var ticker = time.NewTicker(4 * time.Second)

func save_message() {
	if len(buffer) > 0 {
		tm := lib.TimeFormat()
		log := "stats_play_" + tm
		file := config.Config.StatsLogDir + log
		content := ""
		for _,v := range buffer {
			if v == "" {
				continue
			}
			content = content + v + "n"
		}
		if content != "" {
			lib.FilePutContents2(file,content)
			buffer = buffer[0:0]
		}
	}
}

func push_message() {
	for {
		select {
		case c := <-queue:
			buffer = append(buffer,string(c))
		case <-ticker.C:
			save_message()
		}
	}
}

func consume_flow_message(broker kafka.Client,topic string,partition int) {
	conf := kafka.NewConsumerConf(topic,int32(partition))
	conf.StartOffset = kafka.StartOffsetNewest
	consumer,err := broker.Consumer(conf)
	if err != nil {
		log.Fatalf("cannot create kafka consumer for %s:%d: %s",topic,partition,err)
	}

	for {
		msg,err := consumer.Consume()
		if err != nil {
			if err != kafka.ErrNoData {
				log.Printf("cannot consume %s:%d message: %s",err)
			}
			break
		}

		switch partition {
		case config.Config.KafkaPartitionFlay:
			log.Printf("%s:%d,%d: %s",msg.Offset,msg.Value)
		case config.Config.KafkaPartitionShow:
			log.Printf("%s:%d,msg.Value)
		case config.Config.KafkaPartitionFlow:
			log.Printf("%s:%d,msg.Value)
			if len(msg.Value) > 0 {
				queue <- msg.Value
			}
		}

	}
	log.Print("consume_flow_message,consumer quit,%s:%d",partition)
}

func main() {
	defer func() {
		if err := recover(); err != nil {
			lib.P("panic:",err,"nstack:"+lib.Stack(false))
		}
	}()

	defer model.Db.Close()

	flag.Parse()

	go func() {
		profServeMux := http.NewServeMux()
		profServeMux.HandleFunc("/debug/pprof/",pprof.Index)
		profServeMux.HandleFunc("/debug/pprof/cmdline",pprof.Cmdline)
		profServeMux.HandleFunc("/debug/pprof/profile",pprof.Profile)
		profServeMux.HandleFunc("/debug/pprof/symbol",pprof.Symbol)
		err := http.ListenAndServe(":9527",profServeMux)
		if err != nil {
			panic(err)
		}
	}()

	var kafkaAddrs = strings.Split(config.Config.KafkaBrokers,",")
	var conf = kafka.NewBrokerConf("xktv")
	conf.DialTimeout = 1 * time.Second
	conf.DialRetryLimit = 1
	broker,err := kafka.Dial(kafkaAddrs,conf)
	if err != nil {
		log.Fatalf("cannot connect to kafka cluster: %s",err)
	}

	defer broker.Close()

	go push_message()

	go consume_flow_message(broker,config.Config.KafkaTopicFlow,config.Config.KafkaPartitionFlay)
	go consume_flow_message(broker,config.Config.KafkaPartitionShow)
	consume_flow_message(broker,config.Config.KafkaPartitionFlow)
}

优化:

使用bytes.Buffer,更高效。

func save_message() {
	if len(buffer) > 0 {
		tm := lib.TimeFormat()
		log := "stats_play_" + tm
		file := config.Config.StatsLogDir + log

		buf := bytes.Buffer{}
		for _,v := range buffer {
			if v == "" {
				continue
			}
			buf.WriteString(v)
			buf.WriteString("n")
		}

		content := buf.String()
		if content != "" {
			lib.FilePutContents2(file,content)
			buffer = buffer[0:0]
		}
	}
}

(编辑:李大同)

【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容!

    推荐文章
      热点阅读