Commit db243daa authored by wangjianfeng.yz's avatar wangjianfeng.yz

2.0.60

parent b26cc4b9
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
{
"name": "fyge",
"version": "2.0.59",
"version": "2.0.60",
"description": "canvas渲染引擎",
"main": "./build/fyge.min.js",
"module": "./build/fyge.esm.js",
......
......@@ -562,11 +562,17 @@
2.0.59 tbminAdpate暂时恢复方法initedByCanvas和destroyCanvasContent,为了老版本维护
2.0.60 FrameAni改成AnimationNode的形式,和lottie和svga类似
SvgaAni注释声明的animationClip,不知道有啥用(父级上protect有),待测试
SvgaAni的静态方法deepCopyFrames添加返回值类型FrameEntity[]
Object3D添加getPosition2d的方法,stagePos的get方法也用这个,待测试
对于lottie和svga是纯json情况下,到时用weakMap存各自的纹理
D3Renderer文件里的influencesList和attrChannelHash是按照geometry的id缓存的,这样多stage用了一个geometry会有问题
......
......@@ -7,7 +7,7 @@
* @name VERSION
* @type {string}
*/
export const VERSION = "2.0.59";
export const VERSION = "2.0.60";
/**
......
......@@ -62,7 +62,7 @@ export default class Filter {
*/
enabled: boolean;
/**
* If enabled, PixiJS will fit the filter area into boundaries for better performance.
* If enabled, will fit the filter area into boundaries for better performance.
* Switch it off if it does not work for specific shader.
*
* @member {boolean}
......
......@@ -495,8 +495,8 @@ function buildNonNativeLine(graphicsData: GraphicsData, graphicsGeometry: Graphi
*
* @ignore
* @private
* @param {PIXI.GraphicsData} graphicsData - The graphics object containing all the necessary properties
* @param {PIXI.GraphicsGeometry} graphicsGeometry - Geometry where to append output
* @param {GraphicsData} graphicsData - The graphics object containing all the necessary properties
* @param {Graphics} graphicsGeometry - Geometry where to append output
*/
function buildNativeLine(graphicsData: GraphicsData, graphicsGeometry: Graphics): void {
let i = 0;
......
import Texture from "../texture/Texture";
import { Event } from "../events/Event";
import Sprite from "../display/Sprite";
import { Container } from "../display";
import { AnimationNode } from "./AnimationNode";
import { HashObject } from "../HashObject";
import { AnimationClip, IAnimationTrack } from "../AnimationClip";
import { clamp } from "../utils";
/**
* 直接通过替换texture替换图片的动画帧
* 暂时这些动画帧只播放一次,所以简化,要循环时再修改
* 默认按时间间隔播放
* 帧动画
* 图片都以素材为中心为原点
*/
export class FrameAni extends Container {
private showImage: Sprite
export class FrameAni extends AnimationNode {
protected rawData: Texture[]
/**
* 所有的纹理
*/
private texturesAll: Texture[];
/**
* 从0开始 锁步会跳帧
* 每一帧就是一张图片,从0开始
*/
// private _currentFrame: number;//暂时只有不锁步时才需要,但是已经注释,需要时加get方法
private set currentFrame(value: number) {
if (this.texturesAll[value]) {
this.showImage.texture = this.texturesAll[value];
} else {
this.showImage.texture = null;
}
}
/**
* 不开放currentFrame的get方法,因为权限要一致的话set也要变public,
* 但其实外部set赋值是无效的,用reset才有效,所以这里同步给一个方法
* 返回0到totalFrames-1
* 获取当前图片帧
*/
getCurrentFrame(): number {
return this.texturesAll.indexOf(this.showImage.texture)
console.warn("method getCurrentFrame will be abandoned soon,use property currentFrame instead")
return this.currentFrame//this.texturesAll.indexOf(this.showImage.texture)
}
/**
* 所有帧数
* 总时间,秒计
*/
get totalFrames(): number {
return this.texturesAll.length
get totalTime(): number {
return this.rawData && this.rawData.length * (1 / this.fps) || 0;
};
/**
* 是否运行中
*/
private isPlay: boolean;
/**
* 循环次数
*/
private loop: number = 1;
/**
* 开始时间
* 总帧数
*/
private startTime: number
get totalFrames(): number {
return this.rawData && this.rawData.length || 0;
};
/**
* 所有时间,播完所用时间
* 动画显示宽度,取最大的纹理宽度
*/
private allTime: number
get videoWidth(): number {
if (!this.rawData || !this.rawData.length) return 0;
//上面已经判断过rawData的长度,所以不多加参数0
return Math.max(...this.rawData.map(t => t.width))
};
/**
* 播放完的回调
* 动画显示高度,取最大的纹理高度
*/
private callback: Function;
get videoHeight(): number {
if (!this.rawData || !this.rawData.length) return 0;
return Math.max(...this.rawData.map(t => t.height))
};
/**
* 每秒的帧数,默认30帧
* 每秒刷新帧数,默认30;
*/
private _frameRate: number;
public fps = 30;
get frameRate() {
return this._frameRate
console.warn("method frameRate will be abandoned soon,use property fps instead")
return this._fps
}
set frameRate(value: number) {
// if (value == this._frameRate) return//注释掉吧,否则frameRate修改相同,但是texturesAll变了就执行不到了
this._frameRate = value;
this.allTime = this.texturesAll.length / this._frameRate * 1000;
console.warn("method frameRate will be abandoned soon,use property fps instead")
this._fps = value;
}
private showImage: Sprite;
private frameTrack: FrameTrack;
/**
*
* @param texturesAll 所有的资源数组
* @param data 所有的纹理资源数组
*/
constructor(texturesAll: Texture[]) {
super()
this.showImage = new Sprite();
this.showImage.anchorTexture.set(0.5, 0.5);
this.addChild(this.showImage);
this.texturesAll = texturesAll;
this.currentFrame = 0;
this.addEventListener(Event.ENTER_FRAME, this.onEnterFrame, this)
this.frameRate = 30;
constructor(data: Texture[]) {
super(data);
this._instanceType = "FrameAni"
}
//需要做锁步
private count = 0;
private onEnterFrame() {
if (!this.isPlay) {
// this.count = 0
return
init(data: Texture[]) {
if (!data || !data.length || data == this.rawData) return;
//记录源数据
this.rawData = data;
//可以复用,只要一个
if (!this.showImage) {
this.showImage = this.addChild(new Sprite());
this.showImage.anchorTexture.set(0.5, 0.5);
}
// this.count++;
// if (this.count % 2 == 0) {
// this.currentFrame++;
// if (this.currentFrame == this.totalFrames/*-1*/) {
// this.currentFrame = 0;
// this.isPlay = false
// this.callback && this.callback();
// }
// this.texture = RES.getRes(this.sourceAll[this.currentFrame])
// this.x = -this.texture.textureWidth / 2;
// this.y = -this.texture.textureHeight / 2;
// }
var dataNow = Date.now();
var deltaTime = dataNow - this.startTime;
var scale = deltaTime / this.allTime;
if (scale >= 1) {
this.loop--;
if (this.loop == 0) {
this.isPlay = false
this.callback && this.callback();
} else {
this.startTime = Date.now()
this.currentFrame = 0;
}
//可以复用,因为只需要一个
if (!this.frameTrack) {
this.frameTrack = new FrameTrack(this.showImage, data)
} else {
this.currentFrame = (scale * this.texturesAll.length) >> 0;
this.frameTrack.textures = data;
}
this.frameTrack.resetValue();
//
let tracks = [this.frameTrack];
//合成所有时间轴,总时间按总帧数传,其实可能应该用this.totalFrames-1的,无所谓了,只是最后一帧停留了一帧
if (!this.animationClip) {
this.animationClip = new AnimationClip(tracks, this.totalFrames);
} else {
this.animationClip.init(tracks, this.totalFrames)
}
//数据更新
this._onRawDataUpdate();
}
/**
* 从0开始播放
*/
play(loop: number = 1, callback?: Function) {
this.startTime = Date.now();
this.isPlay = true;
this.currentFrame = 0;
this.loop = loop;
this.callback = callback;
}
/**
* 重置为frame,不播放
* 重置为frame,不播放,即将废弃,用gotoAndStop代替
*/
reset(frame: number = 0) {
this.isPlay = false;
this.currentFrame = frame;
console.warn("method reset will be abandoned soon,use method gotoAndStop instead")
this.gotoAndStop(frame);
}
/**
* 重置所有贴图,会置0停止,不设置播放,否则原先的play里的startTime及loop不好维护
* 重置所有贴图,即将废弃,用init代替
* @param texturesAll
*/
resetTexturesAll(texturesAll: Texture[]) {
this.texturesAll = texturesAll;
this.currentFrame = 0;
this.frameRate = 30;
console.warn("method resetTexturesAll will be abandoned soon,use method init instead")
this.init(texturesAll)
}
}
class FrameTrack extends HashObject implements IAnimationTrack {
constructor(
private sprite: Sprite,
public textures: Texture[],
) {
super();
this._instanceType = "FrameTrack";
}
/**
* 这里用的帧数
* @param time 帧小数
*/
setValue(time: number) {
//处理time
time = Math.round(clamp(time, 0, this.textures.length - 1));
//找对应纹理
this.sprite.texture = this.textures[time]
}
resetValue() {
this.setValue(0);
}
destroy() {
this.sprite = null;
this.textures = null;
}
}
\ No newline at end of file
import Texture from "../texture/Texture";
import { Event } from "../events/Event";
import Sprite from "../display/Sprite";
import { Container } from "../display";
/**
* 直接通过替换texture替换图片的动画帧
* 暂时这些动画帧只播放一次,所以简化,要循环时再修改
* 默认按时间间隔播放
* 图片都以素材为中心为原点
*/
export class FrameAni extends Container {
private showImage: Sprite
/**
* 所有的纹理
*/
private texturesAll: Texture[];
/**
* 从0开始 锁步会跳帧
* 每一帧就是一张图片,从0开始
*/
// private _currentFrame: number;//暂时只有不锁步时才需要,但是已经注释,需要时加get方法
private set currentFrame(value: number) {
if (this.texturesAll[value]) {
this.showImage.texture = this.texturesAll[value];
} else {
this.showImage.texture = null;
}
}
/**
* 不开放currentFrame的get方法,因为权限要一致的话set也要变public,
* 但其实外部set赋值是无效的,用reset才有效,所以这里同步给一个方法
* 返回0到totalFrames-1
*/
getCurrentFrame(): number {
return this.texturesAll.indexOf(this.showImage.texture)
}
/**
* 所有帧数
*/
get totalFrames(): number {
return this.texturesAll.length
};
/**
* 是否运行中
*/
private isPlay: boolean;
/**
* 循环次数
*/
private loop: number = 1;
/**
* 开始时间
*/
private startTime: number
/**
* 所有时间,播完所用时间
*/
private allTime: number
/**
* 播放完的回调
*/
private callback: Function;
/**
* 每秒的帧数,默认30帧
*/
private _frameRate: number;
get frameRate() {
return this._frameRate
}
set frameRate(value: number) {
// if (value == this._frameRate) return//注释掉吧,否则frameRate修改相同,但是texturesAll变了就执行不到了
this._frameRate = value;
this.allTime = this.texturesAll.length / this._frameRate * 1000;
}
/**
*
* @param texturesAll 所有的资源数组
*/
constructor(texturesAll: Texture[]) {
super()
this.showImage = new Sprite();
this.showImage.anchorTexture.set(0.5, 0.5);
this.addChild(this.showImage);
this.texturesAll = texturesAll;
this.currentFrame = 0;
this.addEventListener(Event.ENTER_FRAME, this.onEnterFrame, this)
this.frameRate = 30;
}
//需要做锁步
private count = 0;
private onEnterFrame() {
if (!this.isPlay) {
// this.count = 0
return
}
// this.count++;
// if (this.count % 2 == 0) {
// this.currentFrame++;
// if (this.currentFrame == this.totalFrames/*-1*/) {
// this.currentFrame = 0;
// this.isPlay = false
// this.callback && this.callback();
// }
// this.texture = RES.getRes(this.sourceAll[this.currentFrame])
// this.x = -this.texture.textureWidth / 2;
// this.y = -this.texture.textureHeight / 2;
// }
var dataNow = Date.now();
var deltaTime = dataNow - this.startTime;
var scale = deltaTime / this.allTime;
if (scale >= 1) {
this.loop--;
if (this.loop == 0) {
this.isPlay = false
this.callback && this.callback();
} else {
this.startTime = Date.now()
this.currentFrame = 0;
}
} else {
this.currentFrame = (scale * this.texturesAll.length) >> 0;
}
}
/**
* 从0开始播放
*/
play(loop: number = 1, callback?: Function) {
this.startTime = Date.now();
this.isPlay = true;
this.currentFrame = 0;
this.loop = loop;
this.callback = callback;
}
/**
* 重置为frame,不播放
*/
reset(frame: number = 0) {
this.isPlay = false;
this.currentFrame = frame;
}
/**
* 重置所有贴图,会置0停止,不设置播放,否则原先的play里的startTime及loop不好维护
* @param texturesAll
*/
resetTexturesAll(texturesAll: Texture[]) {
this.texturesAll = texturesAll;
this.currentFrame = 0;
this.frameRate = 30;
}
}
\ No newline at end of file
......@@ -50,10 +50,10 @@ export class SvgaAni extends AnimationNode {
set fps(v: number) {
this._fps = v;
}
/**
* 用于控制动画,这里面的按帧数计,animationClip.totalTime是总帧数,因为文件标记的是帧,而不是时间
*/
animationClip: AnimationClip;
// /**
// * 用于控制动画,这里面的按帧数计,animationClip.totalTime是总帧数,因为文件标记的是帧,而不是时间
// */
// animationClip: AnimationClip;
constructor(data: VideoEntity) {
super(data);//里面执行了init
this._instanceType = "SvgaAni";
......@@ -145,7 +145,7 @@ export class SvgaAni extends AnimationNode {
}
}
if (!oriFrames) {
console.warn( `no matched data for ${imageKey}`)
console.warn(`no matched data for ${imageKey}`)
return;
}
var frames = SvgaAni.deepCopyFrames(oriFrames, x, y, scaleX, scaleY, rotation, anchorX, anchorY);
......@@ -260,7 +260,7 @@ export class SvgaAni extends AnimationNode {
rotation: number = 0,
anchorX: number = 0,
anchorY: number = 0
) {
): FrameEntity[] {
var cf = [];
rotation *= Math.PI / 180;
//@ts-ignore
......
......@@ -240,12 +240,26 @@ export class Object3D extends EventDispatcher {
return vector
};
/**
* 获取该物体的舞台坐标
* 获取该物体的舞台坐标,
* 其实应该是场景scene3D父级容器中的2d坐标,
* 如需进一步获取舞台或全局坐标,自行用2d方法继续转换
*/
get stagePos(): { x: number, y: number } {
return this.getPosition2d()
}
/**
* 获取该物体在场景scene3D父级容器中的2d坐标,
* 如需进一步获取舞台或全局坐标,自行用2d方法继续转换
* @param {*} vec3 相对自身的3d坐标,存在字段x,y,z的对象,不传表示xyz=0;
* @returns 返回xy的对象
*/
getPosition2d(vec3?: { x: number, y: number, z: number }): { x: number, y: number } {
//没有场景返回null
if (!this.scene) return null;
//不传就是0,0,0
vec3 = vec3 || { x: 0, y: 0, z: 0 };
const scene = this.scene;
this.localToGlobal(tempVector3.set(0, 0, 0));
this.localToGlobal(tempVector3.copy(vec3 as Vector3));
tempVector3.project(scene.camera);
var a = scene.viewWidth / 2;
var b = scene.viewHeight / 2;
......
newmtl material1
Ka 1.000000 1.000000 1.000000
Kd 1.000000 1.000000 1.000000
Ks 1.000000 1.000000 1.000000
illum 2
Ns 8
map_Kd head3d.jpg
\ No newline at end of file
This diff is collapsed.
( function () {
class DDSLoader extends THREE.CompressedTextureLoader {
constructor( manager ) {
super( manager );
}
parse( buffer, loadMipmaps ) {
const dds = {
mipmaps: [],
width: 0,
height: 0,
format: null,
mipmapCount: 1
}; // Adapted from @toji's DDS utils
// https://github.com/toji/webgl-texture-utils/blob/master/texture-util/dds.js
// All values and structures referenced from:
// http://msdn.microsoft.com/en-us/library/bb943991.aspx/
const DDS_MAGIC = 0x20534444; // let DDSD_CAPS = 0x1;
// let DDSD_HEIGHT = 0x2;
// let DDSD_WIDTH = 0x4;
// let DDSD_PITCH = 0x8;
// let DDSD_PIXELFORMAT = 0x1000;
const DDSD_MIPMAPCOUNT = 0x20000; // let DDSD_LINEARSIZE = 0x80000;
// let DDSD_DEPTH = 0x800000;
// let DDSCAPS_COMPLEX = 0x8;
// let DDSCAPS_MIPMAP = 0x400000;
// let DDSCAPS_TEXTURE = 0x1000;
const DDSCAPS2_CUBEMAP = 0x200;
const DDSCAPS2_CUBEMAP_POSITIVEX = 0x400;
const DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800;
const DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000;
const DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000;
const DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000;
const DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000; // let DDSCAPS2_VOLUME = 0x200000;
// let DDPF_ALPHAPIXELS = 0x1;
// let DDPF_ALPHA = 0x2;
const DDPF_FOURCC = 0x4; // let DDPF_RGB = 0x40;
// let DDPF_YUV = 0x200;
// let DDPF_LUMINANCE = 0x20000;
function fourCCToInt32( value ) {
return value.charCodeAt( 0 ) + ( value.charCodeAt( 1 ) << 8 ) + ( value.charCodeAt( 2 ) << 16 ) + ( value.charCodeAt( 3 ) << 24 );
}
function int32ToFourCC( value ) {
return String.fromCharCode( value & 0xff, value >> 8 & 0xff, value >> 16 & 0xff, value >> 24 & 0xff );
}
function loadARGBMip( buffer, dataOffset, width, height ) {
const dataLength = width * height * 4;
const srcBuffer = new Uint8Array( buffer, dataOffset, dataLength );
const byteArray = new Uint8Array( dataLength );
let dst = 0;
let src = 0;
for ( let y = 0; y < height; y ++ ) {
for ( let x = 0; x < width; x ++ ) {
const b = srcBuffer[ src ];
src ++;
const g = srcBuffer[ src ];
src ++;
const r = srcBuffer[ src ];
src ++;
const a = srcBuffer[ src ];
src ++;
byteArray[ dst ] = r;
dst ++; //r
byteArray[ dst ] = g;
dst ++; //g
byteArray[ dst ] = b;
dst ++; //b
byteArray[ dst ] = a;
dst ++; //a
}
}
return byteArray;
}
const FOURCC_DXT1 = fourCCToInt32( 'DXT1' );
const FOURCC_DXT3 = fourCCToInt32( 'DXT3' );
const FOURCC_DXT5 = fourCCToInt32( 'DXT5' );
const FOURCC_ETC1 = fourCCToInt32( 'ETC1' );
const headerLengthInt = 31; // The header length in 32 bit ints
// Offsets into the header array
const off_magic = 0;
const off_size = 1;
const off_flags = 2;
const off_height = 3;
const off_width = 4;
const off_mipmapCount = 7;
const off_pfFlags = 20;
const off_pfFourCC = 21;
const off_RGBBitCount = 22;
const off_RBitMask = 23;
const off_GBitMask = 24;
const off_BBitMask = 25;
const off_ABitMask = 26; // let off_caps = 27;
const off_caps2 = 28; // let off_caps3 = 29;
// let off_caps4 = 30;
// Parse header
const header = new Int32Array( buffer, 0, headerLengthInt );
if ( header[ off_magic ] !== DDS_MAGIC ) {
console.error( 'THREE.DDSLoader.parse: Invalid magic number in DDS header.' );
return dds;
}
if ( ! header[ off_pfFlags ] & DDPF_FOURCC ) {
console.error( 'THREE.DDSLoader.parse: Unsupported format, must contain a FourCC code.' );
return dds;
}
let blockBytes;
const fourCC = header[ off_pfFourCC ];
let isRGBAUncompressed = false;
switch ( fourCC ) {
case FOURCC_DXT1:
blockBytes = 8;
dds.format = THREE.RGB_S3TC_DXT1_Format;
break;
case FOURCC_DXT3:
blockBytes = 16;
dds.format = THREE.RGBA_S3TC_DXT3_Format;
break;
case FOURCC_DXT5:
blockBytes = 16;
dds.format = THREE.RGBA_S3TC_DXT5_Format;
break;
case FOURCC_ETC1:
blockBytes = 8;
dds.format = THREE.RGB_ETC1_Format;
break;
default:
if ( header[ off_RGBBitCount ] === 32 && header[ off_RBitMask ] & 0xff0000 && header[ off_GBitMask ] & 0xff00 && header[ off_BBitMask ] & 0xff && header[ off_ABitMask ] & 0xff000000 ) {
isRGBAUncompressed = true;
blockBytes = 64;
dds.format = THREE.RGBAFormat;
} else {
console.error( 'THREE.DDSLoader.parse: Unsupported FourCC code ', int32ToFourCC( fourCC ) );
return dds;
}
}
dds.mipmapCount = 1;
if ( header[ off_flags ] & DDSD_MIPMAPCOUNT && loadMipmaps !== false ) {
dds.mipmapCount = Math.max( 1, header[ off_mipmapCount ] );
}
const caps2 = header[ off_caps2 ];
dds.isCubemap = caps2 & DDSCAPS2_CUBEMAP ? true : false;
if ( dds.isCubemap && ( ! ( caps2 & DDSCAPS2_CUBEMAP_POSITIVEX ) || ! ( caps2 & DDSCAPS2_CUBEMAP_NEGATIVEX ) || ! ( caps2 & DDSCAPS2_CUBEMAP_POSITIVEY ) || ! ( caps2 & DDSCAPS2_CUBEMAP_NEGATIVEY ) || ! ( caps2 & DDSCAPS2_CUBEMAP_POSITIVEZ ) || ! ( caps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ ) ) ) {
console.error( 'THREE.DDSLoader.parse: Incomplete cubemap faces' );
return dds;
}
dds.width = header[ off_width ];
dds.height = header[ off_height ];
let dataOffset = header[ off_size ] + 4; // Extract mipmaps buffers
const faces = dds.isCubemap ? 6 : 1;
for ( let face = 0; face < faces; face ++ ) {
let width = dds.width;
let height = dds.height;
for ( let i = 0; i < dds.mipmapCount; i ++ ) {
let byteArray, dataLength;
if ( isRGBAUncompressed ) {
byteArray = loadARGBMip( buffer, dataOffset, width, height );
dataLength = byteArray.length;
} else {
dataLength = Math.max( 4, width ) / 4 * Math.max( 4, height ) / 4 * blockBytes;
byteArray = new Uint8Array( buffer, dataOffset, dataLength );
}
const mipmap = {
'data': byteArray,
'width': width,
'height': height
};
dds.mipmaps.push( mipmap );
dataOffset += dataLength;
width = Math.max( width >> 1, 1 );
height = Math.max( height >> 1, 1 );
}
}
return dds;
}
}
THREE.DDSLoader = DDSLoader;
} )();
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
<!DOCTYPE html>
<html lang="en">
<head>
<title>test</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<style>
body {
color: #cccccc;
font-family: Monospace;
font-size: 13px;
text-align: center;
background-color: #999999;
margin: 0px;
overflow: hidden;
}
#info {
position: absolute;
top: 0px;
width: 100%;
padding: 5px;
}
a {
color: #0080ff;
}
</style>
</head>
<body>
<div id="container"></div>
<!-- <div id="info"><a href="http://threejs.org" target="_blank" rel="noopener">three.js</a> webgl - buffergeometry -
lines - indexed</div> -->
<script src="./js/three.js"></script>
<script src="./js/OrbitControls.js"></script>
<script src="./js/MTLLoader.js"></script>
<script src="./js/OBJLoader.js"></script>
<script src="./js/DDSLoader.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/blazeface"></script>
<!-- <script src="js/Detector.js"></script>
<script src="js/libs/stats.min.js"></script> -->
<!-- 按照前端实现(3d渲染引擎threejs,识别库tensorflow)
1、加载模型obj、材质mtl,
2、调整相机角度截取渲染平面得到base64(模型正面)
3、图片img传base64的路径得到图片
4、图片传入识别库api,得到特征点标识(该库模型加载不到未实现,且识别点较少(只有眼睛,鼻子,嘴巴,耳朵))
4-1、图片传入百度人脸特征识别api,可获取脸部72个特征点(百度接口需后端调用)
5、特征点坐标转换成0到1坐标(坐标/图片(渲染)尺寸,为了下面的射线检测)
6、射线检测传入转换后的特征坐标,相机参数,人脸模型数组,返回特征点的3d空间坐标
7、在模型上红球标记得到的3d空间坐标 -->
<script>
// if ( ! Detector.webgl ) Detector.addGetWebGLMessage();
var container, stats;
var camera, scene, renderer;
var mesh, parent_node;
var testObjs = [];
init();
render();
function init() {
container = document.getElementById('container');
camera = new THREE.PerspectiveCamera(27, window.innerWidth / window.innerHeight, 1, 10000);
// camera.position.set(10, 10, 10);
camera.position.set(0, 0, 10);
scene = new THREE.Scene();
renderer = new THREE.WebGLRenderer({ alpha: true });
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.gammaInput = true;
renderer.gammaOutput = true;
//相机控件,方便调整角度
var orbit = new THREE.OrbitControls(camera, renderer.domElement);
// orbit.enableZoom = false;
container.appendChild(renderer.domElement);
// 直线光
var dl = new THREE.DirectionalLight()
dl.position.set(1, -2, 3)
scene.add(dl);
scene.add(new THREE.AmbientLight(0xffffff, 0.2));
//网格
scene.add(new THREE.GridHelper(100, 100, 100, 0x888888, 0x888888))
//加坐标轴
scene.add(new THREE.AxesHelper(1000))
const manager = new THREE.LoadingManager();
manager.addHandler(/\.dds$/i, new THREE.DDSLoader());
//加载模型
new THREE.MTLLoader(manager)
.load('assets/head3d.mtl', function (materials) {
console.log(materials)
materials.preload();
new THREE.OBJLoader(manager)
.setMaterials(materials)
.load('assets/head3d.obj', function (object) {
console.log(object)
console.log(materials["material1"])
//这个模型没有法线。不能用光照phong材质,换个基础的
object.children[0].material = new THREE.MeshBasicMaterial()//materials.materials["material1"]
//贴图加上
object.children[0].material.map = materials.materials["material1"].map
// object.position.y = - 95;
//尺寸为了显示
object.scale.set(0.01, 0.01, 0.01)
// object.rotation.set(0,1,0)
scene.add(object);
//待射线检测的
testObjs.push(object)
//画一遍得到图片数据
renderer.render(scene, camera);
//转base64
var base64 = renderer.domElement.toDataURL("image/jpeg")
//检测,未实现
imgTest(base64)
//得到空间点坐标,测试点,用0.5*0.5
var globalPos = rayTest(0.5, 0.5, camera, testObjs)
//加个标记
var ball = creatBall()
ball.position.copy(globalPos)
scene.add(ball)
}, () => { }, () => { });
});
}
function render() {
var time = Date.now() * 0.001;
// parent_node.rotation.z = time * 0.5;
renderer.render(scene, camera);
requestAnimationFrame(render);
}
async function imgTest(base64) {
// Load the model.
const model = await blazeface.load().catch((e) => {
console.log(e)
});
var img = await new Promise((r) => {
let img = new Image();
img.onload = r;
img.src = base64;
})
console.log(img)
// Pass in an image or video to the model. The model returns an array of
// bounding boxes, probabilities, and landmarks, one for each detected face.
const returnTensors = false; // Pass in `true` to get tensors back, rather than values.
const predictions = await model.estimateFaces(img, returnTensors);
if (predictions.length > 0) {
console.log(predictions)
/*
`predictions` is an array of objects describing each detected face, for example:
[
{
topLeft: [232.28, 145.26],
bottomRight: [449.75, 308.36],
probability: [0.998],
landmarks: [
[295.13, 177.64], // right eye
[382.32, 175.56], // left eye
[341.18, 205.03], // nose
[345.12, 250.61], // mouth
[252.76, 211.37], // right ear
[431.20, 204.93] // left ear
]
}
]
*/
// for (let i = 0; i < predictions.length; i++) {
// const start = predictions[i].topLeft;
// const end = predictions[i].bottomRight;
// const size = [end[0] - start[0], end[1] - start[1]];
// // Render a rectangle over each detected face.
// ctx.fillRect(start[0], start[1], size[0], size[1]);
// }
}
}
/**
*
* @param {number} x 图片相对位置比率0到1
* @param {number} y 图片相对位置比率0到1
* @param {THREE.Camera} Camera 相机
* @param {THREE.Mesh[]} testObjs
* @returns {THREE.Vector3} 交点位置
*/
function rayTest(x, y, camera, testObjs) {
//画布中心为原点
x = x * 2 - 1;
y = -y * 2 + 1;
let standardVector = new THREE.Vector3(x, y, 1);
// 标准设备坐标转为世界坐标
let worldVector = standardVector.unproject(camera);
// 射线投射方向单位向量(worldVector坐标减相机位置坐标)
let ray = worldVector.sub(camera.position).normalize();
// 创建射线投射器对象
let rayCaster = new THREE.Raycaster(camera.position, ray);
// 返回射线选中的对象数组(第二个参数默认值是false,意为是否遍历图形内部的所有子图形)
let intersects = rayCaster.intersectObjects(testObjs, true);
if (intersects.length > 0) {
// 射线拾取的首个对象
let currObj = intersects[0];
console.log(currObj);
return currObj.point
}
return null
}
function creatBall() {
var ball = new THREE.Mesh(
new THREE.SphereGeometry(0.05),
new THREE.MeshBasicMaterial({ color: 0xff0000 })
)
return ball
}
</script>
</body>
</html>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment