# 车道线拟合.二次线性拟合.hough变换(使用的是概率霍夫变换).RANSAC算法
# 最终采用二次线性拟合方法
import pickle
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import LinearRegression


def line_class(x, y):  # x,y为左右车道线坐标
    # 确定左右车道线
    line = np.zeros((720, 1280))
    for i, j in zip(x, y):
        if int(i) == 1280:
            i = 1279
        if int(j) == 720:
            j = 719
        line[int(j)][int(i)] = 1  # 左边车道线, 先y后x
    line = line.astype(np.uint8)  # float64转化为uint8
    return line


# 清除霍夫检测的多余直线,如果lines中每条直线的斜率与它们对应斜率的平均值之差 大于 threshold ,则将这条直线删除(残差)
def clean_lines(lines, threshold=0.1):
    slope = []
    for line in lines:
        for lx1, ly1, lx2, ly2 in line:
            if not lx1==lx2:
                slope.append([(ly2 - ly1) / (lx2 - lx1)])
    # slope = [(ly2 - ly1) / (lx2 - lx1) for line in lines for lx1, ly1, lx2, ly2 in line]  # 得到每条直线的斜率
    while len(lines) > 0:
        mean = np.mean(slope)
        diff = [abs(s - mean) for s in slope]  # 各个直线斜率的偏差
        idx = np.argmax(diff)  # np.argmax()返回最大值的索引值,即下标
        if diff[idx] > threshold:
            slope.pop(idx)
        else:
            idx_min = np.argmin(diff)
            break
    x1, y1, x2, y2 = lines[idx_min][0]
    x11 = x1 - (x2 - x1) * 30  # 放大,便于画图
    y11 = y1 - (y2 - y1) * 30
    x22 = x2 + (x2 - x1) * 20
    y22 = y2 + (y2 - y1) * 20
    return x11, y11, x22, y22


def cal_MSE(x1, y1, x2, y2, line_x, line_y):
    slope_line = (y2 - y1) / (x2 - x1)  # 斜率
    distance = []
    for x, y in zip(line_x, line_y):
        A = slope_line  # Ax+By+C=0
        B = -1
        C = (x2 * y1 - x1 * y2)/(x2-x1)
        d = np.abs(A * x + B * y + C) / np.sqrt(A ** 2 + B ** 2)  # 点到直线的距离
        distance.append(d)
    MSE = np.array(distance).sum() / len(line_x)
    return MSE


# 导入二值化图像
dist_pickle = "./Advanced-Lane-Detection-master/mytest/warped_binary01.p"
with open(dist_pickle, mode="rb") as f:
    CalData = pickle.load(f)
warped_binary = CalData["warped_binary"]  # 预处理后的透视变换图像,是一个二维数组
left_x = CalData["left_x"]  # 左右车道线的坐标
left_y = CalData["left_y"]
right_x = CalData["right_x"]
right_y = CalData["right_y"]
left_fit = CalData["left_fit"]  # 二次线性拟合系数ABC,左侧车道线
right_fit = CalData["right_fit"]  # 二次线性拟合系数ABC,右侧车道线
# plt.imshow(warped_binary)
# plt.show()

# 将二维数组转化为图片
img_stack = np.dstack((warped_binary*255, warped_binary*255, warped_binary*255))  # warped是二值图,所以乘以255

# 确定左右车道线的二值图
left_line = line_class(left_x, left_y)
right_line = line_class(right_x, right_y)

# 霍夫变换直线检测  ######
img_hough = img_stack.copy()
Hough_lines_left = cv.HoughLinesP(left_line, 1, np.pi / 180, 110)  # 左侧车道
left_x1, left_y1, left_x2, left_y2 = clean_lines(Hough_lines_left, threshold=0.1)  # 多条直线最终得到一条
scope_hough_left = (left_y2-left_y1)/(left_x2-left_x1)
intercept_hough_left = left_y1-left_x1*(left_y2-left_y1)/(left_x2-left_x1)
print("左车道斜率:", scope_hough_left)
print("左车道截距:", intercept_hough_left)
cv.line(img_hough, (left_x1, left_y1), (left_x2, left_y2), (0, 255, 0), 2)

# 画出霍夫变换检测出的所有直线
# for line in Hough_lines_right:
#     for x1, y1, x2, y2 in line:
#         x11 = x1-(x2-x1)*30
#         y11 = y1-(y2-y1)*30
#         x22 = x2+(x2-x1)*20
#         y22 = y2+(y2-y1)*20
#         cv.line(img_stack, (x11, y11), (x22, y22), (0, 0, 255), 2)
# cv.imshow('iimg_hough', img_hough)
# cv.waitKey(0)

Hough_lines_right = cv.HoughLinesP(right_line, 1, np.pi / 180, 300)  # 右侧车道
right_x1, right_y1, right_x2, right_y2 = clean_lines(Hough_lines_right, threshold=0.1)  # 多条直线最终得到一条
scope_hough_right = (right_y2-right_y1)/(right_x2-right_x1)
intercept_hough_right = right_y1-right_x1*(right_y2-right_y1)/(right_x2-right_x1)
print("右车道斜率:", scope_hough_right)
print("右车道截距:", intercept_hough_right)
cv.line(img_hough, (right_x1, right_y1), (right_x2, right_y2), (0, 255, 0), 2)
# 添加字体
font = cv.FONT_HERSHEY_SIMPLEX  # 设置字体格式
linetype = cv.LINE_AA  # 线条样式
# 文本内容、文本左下角坐标、字体格式、字体大小、BGR、线的厚度、线条样式
str_left = 'y={}x+{}'.format(round(scope_hough_left*1000)/1000, int(intercept_hough_left*1000)/1000)
str_right = 'y={}x{}'.format(round(scope_hough_right*1000)/1000, int(intercept_hough_right*1000)/1000)
cv.putText(img_hough, str_left, (180, 530), font, 1, (0, 0, 255), 2, linetype)
cv.putText(img_hough, str_right, (700, 380), font, 1, (0, 0, 255), 2, linetype)
cv.imshow('img_hough', img_hough)  # Hough算法得到的直线
cv.waitKey(0)

# 计算MSE
left_MSE = cal_MSE(left_x1, left_y1, left_x2, left_y2, left_x, left_y)
right_MSE = cal_MSE(right_x1, right_y1, right_x2, right_y2, right_x, right_y)
print('left line MSE:', left_MSE, 'right line MSE:', right_MSE)



# RANSAC直线检测  ######
# max_trials=100即最大迭代次数为100次
# min_samples=66即样本最低数量为66个
# loss=‘absolute_loss’即使用均方误差损失函数
# residual_threshold=6即只允许与拟合线垂直距离在6个单位以内的采样点被包括在内点集
# 左车道
ransac_left = RANSACRegressor(LinearRegression(),
                         max_trials=100,
                         min_samples=int(len(left_x)/6),  # 取样本数量的1/6作为最小采样数
                         loss='absolute_loss',
                         residual_threshold=18)
left_x_ransac = left_x.reshape(-1, 1)
left_y_ransac = left_y.reshape(-1, 1)
ransac_left.fit(left_x_ransac, left_y_ransac)
inlier_mask_left = ransac_left.inlier_mask_  # 获取内点集,bool型,true表示该index对应的点为内点
outlier_mask_left = np.logical_not(inlier_mask_left)  # 获取非内点集
scope_ransac_left = ransac_left.estimator_.coef_[0][0]  # 斜率
intercept_left = ransac_left.estimator_.intercept_[0]  # 截距
print('left RANSAC算法线性回归斜率:{}'.format(scope_ransac_left))
print('left RANSAC算法线性回归截距:{}'.format(intercept_left))
# 右车道
ransac_right = RANSACRegressor(LinearRegression(),
                         max_trials=100,
                         min_samples=int(len(right_x)/6),
                         loss='absolute_loss',
                         residual_threshold=18)
right_x_ransac = right_x.reshape(-1, 1)
right_y_ransac = right_y.reshape(-1, 1)
ransac_right.fit(right_x_ransac, right_y_ransac)
inlier_mask_right = ransac_right.inlier_mask_  # 获取内点集,bool型,true表示该index对应的点为内点
outlier_mask_right = np.logical_not(inlier_mask_right)  # 获取非内点集
scope_ransac_right = ransac_right.estimator_.coef_[0][0]  # 斜率
intercept_right = ransac_right.estimator_.intercept_[0]  # 截距
print('right RANSAC算法线性回归斜率:{}'.format(scope_ransac_right))
print('right RANSAC算法线性回归截距:{}'.format(intercept_right))

# 画图
img_ransac = img_stack.copy()
# 左车道
ransac_x1_left, ransac_y1_left, ransac_x2_left, ransac_y2_left = 0.0, intercept_left, (-intercept_left/scope_ransac_left), 0.0  # 根据直线和斜率选择四个点
cv.line(img_ransac, (int(ransac_x1_left), int(ransac_y1_left)), (int(ransac_x2_left), int(ransac_y2_left)), (255, 0, 0), 2)  # 像素点必须是整数
# 右车道
ransac_x1_right, ransac_y1_right, ransac_x2_right, ransac_y2_right = (720-intercept_right)/scope_ransac_right, 720, (-intercept_right/scope_ransac_right), 0.0
cv.line(img_ransac, (int(ransac_x1_right), int(ransac_y1_right)), (int(ransac_x2_right), int(ransac_y2_right)), (255, 0, 0), 2)
# 添加字体
font = cv.FONT_HERSHEY_SIMPLEX  # 设置字体格式
linetype = cv.LINE_AA  # 线条样式
# 文本内容、文本左下角坐标、字体格式、字体大小、BGR、线的厚度、线条样式
str_left = 'y={}x+{}'.format(round(scope_ransac_left*1000)/1000, int(intercept_left*1000)/1000)
str_right = 'y={}x{}'.format(round(scope_ransac_right*1000)/1000, int(intercept_right*1000)/1000)
cv.putText(img_ransac, str_left, (180, 530), font, 1, (0, 0, 255), 2, linetype)
cv.putText(img_ransac, str_right, (700, 380), font, 1, (0, 0, 255), 2, linetype)
cv.imshow('img_ransac', img_ransac)  # RANSAC算法得到的直线
cv.waitKey(0)
# 计算MSE
left_ransac_MSE = cal_MSE(ransac_x1_left, ransac_y1_left, ransac_x2_left, ransac_y2_left, left_x, left_y)
right_ransac_MSE = cal_MSE(ransac_x1_right, ransac_y1_right, ransac_x2_right, ransac_y2_right, right_x, right_y)
print('RANSAC left line MSE:', left_ransac_MSE, 'RANSAC right line MSE:', right_ransac_MSE)

# 画内点和外点
img_ransac_map = np.zeros(img_stack.shape, dtype=np.uint8)
# 左车道
inlier_index_left = np.array(np.nonzero(inlier_mask_left))[0]  # 内点的下标
outlier_index_left = np.array(np.nonzero(outlier_mask_left))[0]  # 外点的下标
for i in inlier_index_left:
    cv.circle(img_ransac_map, (int(left_x[i]), int(left_y[i])), 1, (0, 0, 255))  # cv.circle中心点坐标不能用浮点数
for i in outlier_index_left:
    cv.circle(img_ransac_map, (int(left_x[i]), int(left_y[i])), 1, (0, 255, 0))
# 右车道
inlier_index_right = np.array(np.nonzero(inlier_mask_right))[0]  # 内点的下标
outlier_index_right = np.array(np.nonzero(outlier_mask_right))[0]  # 外点的下标
for i in inlier_index_right:
    cv.circle(img_ransac_map, (int(right_x[i]), int(right_y[i])), 1, (0, 0, 255))  # cv.circle中心点坐标不能用浮点数
for i in outlier_index_right:
    cv.circle(img_ransac_map, (int(right_x[i]), int(right_y[i])), 1, (0, 255, 0))
cv.imshow('img_ransac_map', img_ransac_map)  # RANSAC算法得到的内点和外点
cv.waitKey(0)


# 二次线性拟合 ######  x = Ay^2+B^y+C
# img_quadratic = warped01_lab.copy()  # 二次线性拟合图片
img_quadratic = img_stack.copy()  # 二次线性拟合图片
b, g, r = cv.split(img_quadratic)
A_left, B_left, C_left = left_fit
A_right, B_right, C_right = right_fit
for y in range(720):
    x1 = int(A_left*(y**2)+B_left*y+C_left)
    b[y][x1], g[y][x1], r[y][x1] = 255, 0, 0
    x2 = int(A_right*(y**2)+B_right*y+C_right)
    b[y][x2], g[y][x2], r[y][x2] = 255, 0, 0
img_quadratic = cv.merge([b, g, r])
# 添加字体
font = cv.FONT_HERSHEY_SIMPLEX  # 设置字体格式
linetype = cv.LINE_AA  # 线条样式
# 文本内容、文本左下角坐标、字体格式、字体大小、BGR、线的厚度、线条样式
str_left = 'x={}y^2{}y+{}'.format(round(A_left*100000)/100000, int(B_left*1000)/1000, int(C_left*1000)/1000)
str_right = 'x={}y^2+{}y+{}'.format(round(A_right*100000)/100000, int(B_right*1000)/1000, int(C_right*1000)/1000)
cv.putText(img_quadratic, str_left, (100, 430), font, 1, (0, 0, 255), 2, linetype)
cv.putText(img_quadratic, str_right, (700, 380), font, 1, (0, 0, 255), 2, linetype)
cv.imshow("img_quadratic", img_quadratic)
cv.waitKey(0)

quadratic_fit.py

# 直接对输入的图片1280*720进行二次线性拟合
# file name quadratic_fit.py
import cv2
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import LinearRegression


class Line:
    def __init__(self):
        # was the line detected in the last iteration?
        self.detected = False

        # x and y values in last frame
        self.x = None
        self.y = None

        # x intercepts for average smoothing
        self.bottom_x = deque(maxlen=frame_num)
        self.top_x = deque(maxlen=frame_num)

        # Record last x intercept
        self.current_bottom_x = None
        self.current_top_x = None

        # Polynomial coefficients: x = A*y**2 + B*y + C
        self.A = deque(maxlen=frame_num)
        self.B = deque(maxlen=frame_num)
        self.C = deque(maxlen=frame_num)
        self.fit = None
        self.fitx = None
        self.fity = None

    def get_intercepts(self):
        bottom = self.fit[0] * 720 ** 2 + self.fit[1] * 720 + self.fit[2]
        top = self.fit[2]
        return bottom, top

    def quick_search(self, nonzerox, nonzeroy):
        """
        Assuming in last frame, lane has been detected. Based on last x/y coordinates, quick search current lane.
        """
        x_inds = []
        y_inds = []
        if self.detected:
            win_bottom = 720
            win_top = 630
            while win_top >= 0:
                yval = np.mean([win_top, win_bottom])
                xval = (np.median(self.A)) * yval ** 2 + (np.median(self.B)) * yval + (np.median(self.C))
                x_idx = np.where((((xval - 50) < nonzerox)
                                  & (nonzerox < (xval + 50))
                                  & ((nonzeroy > win_top) & (nonzeroy < win_bottom))))
                x_window, y_window = nonzerox[x_idx], nonzeroy[x_idx]
                if np.sum(x_window) != 0:
                    np.append(x_inds, x_window)
                    np.append(y_inds, y_window)
                win_top -= 90
                win_bottom -= 90
        if np.sum(x_inds) == 0:
            self.detected = False  # If no lane pixels were detected then perform blind search
        return x_inds, y_inds, self.detected

    def blind_search(self, nonzerox, nonzeroy, image):
        """
        Sliding window search method, start from blank.
        """
        x_inds = []
        y_inds = []
        if self.detected is False:
            win_bottom = 720
            win_top = 640
            while win_top >= 0:
                histogram = np.sum(image[win_top:win_bottom, :], axis=0)
                if self == right:
                    base = np.argmax(histogram[640:]) + 640
                else:
                    base = np.argmax(histogram[:640])
                x_idx = np.where((((base - 50) < nonzerox) & (nonzerox < (base + 50))
                                  & ((nonzeroy > win_top) & (nonzeroy < win_bottom))))
                x_window, y_window = nonzerox[x_idx], nonzeroy[x_idx]
                if np.sum(x_window) != 0:
                    x_inds.extend(x_window)
                    y_inds.extend(y_window)
                win_top -= 80
                win_bottom -= 80
        if np.sum(x_inds) > 0:
            self.detected = True
        else:
            y_inds = self.y
            x_inds = self.x
        return x_inds, y_inds, self.detected

    def sort_idx(self):
        """
        Sort x and y according to y index
        """
        sorted_idx = np.argsort(self.y)
        sorted_x_inds = self.x[sorted_idx]
        sorted_y_inds = self.y[sorted_idx]

        return sorted_x_inds, sorted_y_inds

    def get_fit(self):
        """
        Based on searched x and y coordinates, polyfit with second order.
        Take median value in previous frames to smooth.
        """
        self.fit = np.polyfit(self.y, self.x, 2)

        self.current_bottom_x, self.current_top_x = self.get_intercepts()

        self.bottom_x.append(self.current_bottom_x)
        self.top_x.append(self.current_top_x)
        self.current_bottom_x = np.median(self.bottom_x)
        self.current_top_x = np.median(self.top_x)

        self.x = np.append(self.x, self.current_bottom_x)
        self.x = np.append(self.x, self.current_top_x)
        self.y = np.append(self.y, 720)
        self.y = np.append(self.y, 0)

        self.x, self.y = self.sort_idx()
        self.fit = np.polyfit(self.y, self.x, 2)
        self.A.append(self.fit[0])
        self.B.append(self.fit[1])
        self.C.append(self.fit[2])
        self.fity = self.y
        self.fit = [np.median(self.A), np.median(self.B), np.median(self.C)]
        self.fitx = self.fit[0] * self.fity ** 2 + self.fit[1] * self.fity + self.fit[2]

        return self.fit, self.fitx, self.fity


def process(warped_binary):
    nonzerox, nonzeroy = np.nonzero(np.transpose(warped_binary))  # 返回warped_binary数组中非零元素的索引值
    print('left.detected', left.detected)
    print('right.detected', right.detected)
    # nonzerox, nonzeroy包含了二值图像所有的非零点坐标,但是不知道这些点属于左车道线还是右车道线,对其进行分类
    if left.detected is True:
        leftx, lefty, left.detected = left.quick_search(nonzerox, nonzeroy)
    if right.detected is True:
        rightx, righty, right.detected = right.quick_search(nonzerox, nonzeroy)
    if left.detected is False:
        leftx, lefty, left.detected = left.blind_search(nonzerox, nonzeroy, warped_binary)
    if right.detected is False:
        rightx, righty, right.detected = right.blind_search(nonzerox, nonzeroy, warped_binary)

    # 必须更新,否则下次使用默认为true
    # left.detected = False
    # right.detected = False

    left.y = np.array(lefty).astype(np.float32)
    left.x = np.array(leftx).astype(np.float32)
    right.y = np.array(righty).astype(np.float32)
    right.x = np.array(rightx).astype(np.float32)
    left_x, left_y, right_x, right_y = left.x, left.y, right.x, right.y  # 车道线坐标

    # 得到对车道线坐标进行二次多项式拟合的结果
    # left_fit为二次项系数的三个参数A、B、C。left_fity为每个搜索框上下两点y的坐标。left_fitx为最终拟合后,输入y对应的x坐标
    left_fit, left_fitx, left_fity = left.get_fit()
    right_fit, right_fitx, right_fity = right.get_fit()
    return left_fit, right_fit, left_x, left_y, right_x, right_y


def lab_luv(img):
    b_thresh = (180, 255)  # 黄线
    # l_thresh = (225, 255)  # 白线
    l_thresh = (160, 255)  # 白线

    img_luv = cv2.cvtColor(img, cv2.COLOR_BGR2Luv)
    img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
    l_channel = img_luv[:, :, 0]  # 提取l通道
    l_bin_luv = np.zeros_like(l_channel)
    l_bin_luv[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1  # 二值图

    b_channel = img_lab[:, :, 2]  # 提取b通道
    b_bin_lab = np.zeros_like(b_channel)
    b_bin_lab[(b_channel >= b_thresh[0]) & (b_channel <= b_thresh[1])] = 1

    combine = np.zeros_like(l_channel)
    combine[(l_bin_luv == 1) | (b_bin_lab == 1)] = 1  # 将黄线和白线提取出来
    res = np.dstack((combine * 255, combine * 255, combine * 255))
    return res, combine


class QuadraticFit:
    def __init__(self, image):
        self.img = image

    def my_fit(self):
        img0 = self.img
        img0 = cv2.resize(img0, (1280, 720))
        _, warped01_binary = lab_luv(img0)  # warped01_lab三维二值图0和255,warped01_binary二维二值图0和1 需要进行二次线性拟合的图
        plt.imshow(warped01_binary)
        plt.show()

        left_fit, right_fit, _, _, _, _ = process(warped01_binary)  # 拟合系数

        # 二次线性拟合直线检测  ###### x = Ay^2 + By + C,将拟合的道路线画在全黑图像上,最后权重融合
        # img_quadratic = np.zeros(img0.shape, np.uint8)
        img_quadratic = img0
        A_left, B_left, C_left = left_fit
        A_right, B_right, C_right = right_fit
        # # 改变RGB画图
        # b, g, r = cv2.split(img_quadratic)
        # for y in range(720):
        #     x1 = int(A_left * (y ** 2) + B_left * y + C_left)
        #     b[y][x1], g[y][x1], r[y][x1] = 255, 0, 0
        #     x2 = int(A_right * (y ** 2) + B_right * y + C_right)
        #     b[y][x2], g[y][x2], r[y][x2] = 255, 0, 0
        # img_quadratic = cv2.merge([b, g, r])
        # """
        y = np.arange(0, 720)
        left_fitx = A_left * (y ** 2) + B_left * y + C_left
        left_fitx = left_fitx.astype('int')
        pts_left = np.array([np.flipud(np.transpose(np.vstack([left_fitx, y])))])
        right_fitx = A_right * (y ** 2) + B_right * y + C_right
        right_fitx = right_fitx.astype('int')
        pts_right = np.array([np.transpose(np.vstack([right_fitx, y]))])
        pts = np.hstack((pts_left, pts_right))
        cv2.polylines(img_quadratic, np.int_([pts]),
                      isClosed=False, color=(200, 0, 0), thickness=30)  # draw line
        cv2.fillPoly(img_quadratic, np.int_([pts]), (0, 255, 0))  # Draw the lane onto the warped blank image
        # cv2.polylines(img0, np.int_([pts]),
        #               isClosed=False, color=(200, 0, 0), thickness=30)  # draw line
        # cv2.fillPoly(img0, np.int_([pts]), (0, 255, 0))  # Draw the lane onto the warped blank image
        # """
        return img_quadratic


# 初始参数
frame_num = 15  # latest frames number of good detection
right = Line()  # 定义左右车道线的类
left = Line()

# img0 = cv2.imread('warped_roi01.jpg')
# img0 = cv2.resize(img0, (1280, 720))
# img0 = cv2.medianBlur(img0, 5)  # 去除椒盐噪声
# fit = QuadraticFit(img0)
# fit_res = fit.my_fit()
# cv2.imshow("img_quadratic", fit_res)
# cv2.waitKey(0)

"""
# 读取图片
img0 = cv2.imread('warped_roi.jpg')
img0 = cv2.resize(img0, (1280, 720))
img0 = cv2.medianBlur(img0, 5)  # 去除椒盐噪声

warped01_lab, warped01_binary = lab_luv(img0)  # warped01_lab三维二值图0和255,warped01_binary二维二值图0和1 需要进行二次线性拟合的图

left_fit, right_fit, _, _, _, _ = process(warped01_binary)  # 拟合系数
left_x, left_y, right_x, right_y = left.x, left.y, right.x, right.y  # 车道线坐标


# 二次线性拟合直线检测  ###### x = Ay^2 + By + C
img_quadratic = warped01_lab.copy()  # 二次线性拟合图片
# b, g, r = cv2.split(img_quadratic)
A_left, B_left, C_left = left_fit
A_right, B_right, C_right = right_fit
y = np.arange(0, 720)
left_fitx = A_left * (y ** 2) + B_left * y + C_left
left_fitx = left_fitx.astype('int')
pts_left = np.array([np.flipud(np.transpose(np.vstack([left_fitx, y])))])
right_fitx = A_right * (y ** 2) + B_right * y + C_right
right_fitx = right_fitx.astype('int')
pts_right = np.array([np.transpose(np.vstack([right_fitx, y]))])
pts = np.hstack((pts_left, pts_right))

# Draw lines
cv2.polylines(img_quadratic, np.int_([pts]),
              isClosed=False, color=(200, 0, 0), thickness=30)
# Draw the lane onto the warped blank image
cv2.fillPoly(img_quadratic, np.int_([pts]), (0, 255, 0))
# cv2.imshow('img_quadratic', img_quadratic)
# cv2.waitKey(0)

# for y in range(720):  # 改变RGB画图
#     x1 = int(A_left * (y ** 2) + B_left * y + C_left)
#     b[y][x1], g[y][x1], r[y][x1] = 255, 0, 0
#     x2 = int(A_right * (y ** 2) + B_right * y + C_right)
#     b[y][x2], g[y][x2], r[y][x2] = 255, 0, 0
# img_quadratic = cv2.merge([b, g, r])
cv2.imshow("img_quadratic", img_quadratic)
cv2.waitKey(0)
"""